llama-cloud 0.1.5__py3-none-any.whl → 0.1.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of llama-cloud might be problematic. Click here for more details.

Files changed (129) hide show
  1. llama_cloud/__init__.py +138 -2
  2. llama_cloud/client.py +15 -0
  3. llama_cloud/resources/__init__.py +17 -1
  4. llama_cloud/resources/chat_apps/__init__.py +2 -0
  5. llama_cloud/resources/chat_apps/client.py +630 -0
  6. llama_cloud/resources/data_sinks/client.py +2 -2
  7. llama_cloud/resources/data_sources/client.py +2 -2
  8. llama_cloud/resources/embedding_model_configs/client.py +4 -4
  9. llama_cloud/resources/files/__init__.py +2 -2
  10. llama_cloud/resources/files/client.py +21 -0
  11. llama_cloud/resources/files/types/__init__.py +2 -1
  12. llama_cloud/resources/files/types/file_create_permission_info_value.py +7 -0
  13. llama_cloud/resources/jobs/__init__.py +2 -0
  14. llama_cloud/resources/jobs/client.py +148 -0
  15. llama_cloud/resources/llama_extract/__init__.py +5 -0
  16. llama_cloud/resources/llama_extract/client.py +1038 -0
  17. llama_cloud/resources/llama_extract/types/__init__.py +6 -0
  18. llama_cloud/resources/llama_extract/types/extract_agent_create_data_schema_value.py +7 -0
  19. llama_cloud/resources/llama_extract/types/extract_agent_update_data_schema_value.py +7 -0
  20. llama_cloud/resources/organizations/client.py +14 -14
  21. llama_cloud/resources/parsing/client.py +480 -229
  22. llama_cloud/resources/pipelines/client.py +182 -126
  23. llama_cloud/resources/projects/client.py +210 -102
  24. llama_cloud/resources/reports/__init__.py +5 -0
  25. llama_cloud/resources/reports/client.py +1198 -0
  26. llama_cloud/resources/reports/types/__init__.py +7 -0
  27. llama_cloud/resources/reports/types/update_report_plan_api_v_1_reports_report_id_plan_patch_request_action.py +25 -0
  28. llama_cloud/resources/retrievers/__init__.py +2 -0
  29. llama_cloud/resources/retrievers/client.py +654 -0
  30. llama_cloud/types/__init__.py +124 -2
  31. llama_cloud/types/{chat_message.py → app_schema_chat_chat_message.py} +2 -2
  32. llama_cloud/types/chat_app.py +44 -0
  33. llama_cloud/types/chat_app_response.py +41 -0
  34. llama_cloud/types/cloud_az_storage_blob_data_source.py +1 -0
  35. llama_cloud/types/cloud_box_data_source.py +1 -0
  36. llama_cloud/types/cloud_confluence_data_source.py +1 -0
  37. llama_cloud/types/cloud_google_drive_data_source.py +1 -0
  38. llama_cloud/types/cloud_jira_data_source.py +1 -0
  39. llama_cloud/types/cloud_notion_page_data_source.py +1 -0
  40. llama_cloud/types/cloud_one_drive_data_source.py +1 -0
  41. llama_cloud/types/cloud_postgres_vector_store.py +1 -0
  42. llama_cloud/types/cloud_s_3_data_source.py +1 -0
  43. llama_cloud/types/cloud_sharepoint_data_source.py +1 -0
  44. llama_cloud/types/cloud_slack_data_source.py +1 -0
  45. llama_cloud/types/composite_retrieval_mode.py +21 -0
  46. llama_cloud/types/composite_retrieval_result.py +38 -0
  47. llama_cloud/types/composite_retrieved_text_node.py +42 -0
  48. llama_cloud/types/data_sink.py +1 -1
  49. llama_cloud/types/data_sink_create.py +1 -1
  50. llama_cloud/types/data_source.py +1 -1
  51. llama_cloud/types/data_source_create.py +1 -1
  52. llama_cloud/types/edit_suggestion.py +39 -0
  53. llama_cloud/types/eval_dataset_job_record.py +1 -0
  54. llama_cloud/types/extract_agent.py +45 -0
  55. llama_cloud/types/extract_agent_data_schema_value.py +5 -0
  56. llama_cloud/types/extract_config.py +40 -0
  57. llama_cloud/types/extract_job.py +35 -0
  58. llama_cloud/types/extract_job_create.py +40 -0
  59. llama_cloud/types/extract_job_create_data_schema_override_value.py +7 -0
  60. llama_cloud/types/extract_mode.py +17 -0
  61. llama_cloud/types/extract_resultset.py +46 -0
  62. llama_cloud/types/extract_resultset_data.py +11 -0
  63. llama_cloud/types/extract_resultset_data_item_value.py +7 -0
  64. llama_cloud/types/extract_resultset_data_zero_value.py +7 -0
  65. llama_cloud/types/extract_resultset_extraction_metadata_value.py +7 -0
  66. llama_cloud/types/file.py +3 -0
  67. llama_cloud/types/file_permission_info_value.py +5 -0
  68. llama_cloud/types/filter_condition.py +9 -1
  69. llama_cloud/types/filter_operator.py +4 -0
  70. llama_cloud/types/image_block.py +35 -0
  71. llama_cloud/types/input_message.py +1 -1
  72. llama_cloud/types/job_name_mapping.py +4 -0
  73. llama_cloud/types/job_names.py +89 -0
  74. llama_cloud/types/job_record.py +57 -0
  75. llama_cloud/types/job_record_with_usage_metrics.py +36 -0
  76. llama_cloud/types/llama_index_core_base_llms_types_chat_message.py +39 -0
  77. llama_cloud/types/llama_index_core_base_llms_types_chat_message_blocks_item.py +33 -0
  78. llama_cloud/types/llama_parse_parameters.py +15 -0
  79. llama_cloud/types/llm.py +1 -0
  80. llama_cloud/types/llm_model_data.py +1 -0
  81. llama_cloud/types/llm_parameters.py +1 -0
  82. llama_cloud/types/managed_ingestion_status.py +4 -0
  83. llama_cloud/types/managed_ingestion_status_response.py +1 -0
  84. llama_cloud/types/object_type.py +4 -0
  85. llama_cloud/types/organization.py +5 -0
  86. llama_cloud/types/paginated_jobs_history_with_metrics.py +35 -0
  87. llama_cloud/types/paginated_report_response.py +35 -0
  88. llama_cloud/types/parse_plan_level.py +21 -0
  89. llama_cloud/types/parsing_job_structured_result.py +32 -0
  90. llama_cloud/types/pipeline_create.py +3 -1
  91. llama_cloud/types/pipeline_data_source.py +1 -1
  92. llama_cloud/types/pipeline_file.py +3 -0
  93. llama_cloud/types/pipeline_file_permission_info_value.py +7 -0
  94. llama_cloud/types/playground_session.py +2 -2
  95. llama_cloud/types/preset_retrieval_params.py +1 -0
  96. llama_cloud/types/progress_event.py +44 -0
  97. llama_cloud/types/progress_event_status.py +33 -0
  98. llama_cloud/types/prompt_spec.py +2 -2
  99. llama_cloud/types/related_node_info.py +2 -2
  100. llama_cloud/types/related_node_info_node_type.py +7 -0
  101. llama_cloud/types/report.py +33 -0
  102. llama_cloud/types/report_block.py +34 -0
  103. llama_cloud/types/report_block_dependency.py +29 -0
  104. llama_cloud/types/report_create_response.py +31 -0
  105. llama_cloud/types/report_event_item.py +40 -0
  106. llama_cloud/types/report_event_item_event_data.py +45 -0
  107. llama_cloud/types/report_event_type.py +37 -0
  108. llama_cloud/types/report_metadata.py +43 -0
  109. llama_cloud/types/report_plan.py +36 -0
  110. llama_cloud/types/report_plan_block.py +36 -0
  111. llama_cloud/types/report_query.py +33 -0
  112. llama_cloud/types/report_response.py +41 -0
  113. llama_cloud/types/report_state.py +37 -0
  114. llama_cloud/types/report_state_event.py +38 -0
  115. llama_cloud/types/report_update_event.py +38 -0
  116. llama_cloud/types/retrieve_results.py +1 -1
  117. llama_cloud/types/retriever.py +45 -0
  118. llama_cloud/types/retriever_create.py +37 -0
  119. llama_cloud/types/retriever_pipeline.py +37 -0
  120. llama_cloud/types/status_enum.py +4 -0
  121. llama_cloud/types/supported_llm_model_names.py +4 -0
  122. llama_cloud/types/text_block.py +31 -0
  123. llama_cloud/types/text_node.py +13 -6
  124. llama_cloud/types/usage_metric_response.py +34 -0
  125. llama_cloud/types/user_job_record.py +32 -0
  126. {llama_cloud-0.1.5.dist-info → llama_cloud-0.1.7.dist-info}/METADATA +3 -1
  127. {llama_cloud-0.1.5.dist-info → llama_cloud-0.1.7.dist-info}/RECORD +129 -59
  128. {llama_cloud-0.1.5.dist-info → llama_cloud-0.1.7.dist-info}/WHEEL +1 -1
  129. {llama_cloud-0.1.5.dist-info → llama_cloud-0.1.7.dist-info}/LICENSE +0 -0
@@ -15,6 +15,7 @@ from .advanced_mode_transform_config_segmentation_config import (
15
15
  AdvancedModeTransformConfigSegmentationConfig_None,
16
16
  AdvancedModeTransformConfigSegmentationConfig_Page,
17
17
  )
18
+ from .app_schema_chat_chat_message import AppSchemaChatChatMessage
18
19
  from .auto_transform_config import AutoTransformConfig
19
20
  from .azure_open_ai_embedding import AzureOpenAiEmbedding
20
21
  from .azure_open_ai_embedding_config import AzureOpenAiEmbeddingConfig
@@ -25,8 +26,9 @@ from .bedrock_embedding_config import BedrockEmbeddingConfig
25
26
  from .box_auth_mechanism import BoxAuthMechanism
26
27
  from .character_chunking_config import CharacterChunkingConfig
27
28
  from .character_splitter import CharacterSplitter
29
+ from .chat_app import ChatApp
30
+ from .chat_app_response import ChatAppResponse
28
31
  from .chat_data import ChatData
29
- from .chat_message import ChatMessage
30
32
  from .cloud_az_storage_blob_data_source import CloudAzStorageBlobDataSource
31
33
  from .cloud_azure_ai_search_vector_store import CloudAzureAiSearchVectorStore
32
34
  from .cloud_box_data_source import CloudBoxDataSource
@@ -48,6 +50,9 @@ from .cloud_slack_data_source import CloudSlackDataSource
48
50
  from .code_splitter import CodeSplitter
49
51
  from .cohere_embedding import CohereEmbedding
50
52
  from .cohere_embedding_config import CohereEmbeddingConfig
53
+ from .composite_retrieval_mode import CompositeRetrievalMode
54
+ from .composite_retrieval_result import CompositeRetrievalResult
55
+ from .composite_retrieved_text_node import CompositeRetrievedTextNode
51
56
  from .configurable_data_sink_names import ConfigurableDataSinkNames
52
57
  from .configurable_data_source_names import ConfigurableDataSourceNames
53
58
  from .configurable_transformation_definition import ConfigurableTransformationDefinition
@@ -66,6 +71,7 @@ from .data_source_create_component import DataSourceCreateComponent
66
71
  from .data_source_create_custom_metadata_value import DataSourceCreateCustomMetadataValue
67
72
  from .data_source_custom_metadata_value import DataSourceCustomMetadataValue
68
73
  from .data_source_definition import DataSourceDefinition
74
+ from .edit_suggestion import EditSuggestion
69
75
  from .element_segmentation_config import ElementSegmentationConfig
70
76
  from .embedding_model_config import EmbeddingModelConfig
71
77
  from .embedding_model_config_embedding_config import (
@@ -98,12 +104,25 @@ from .eval_metric import EvalMetric
98
104
  from .eval_question import EvalQuestion
99
105
  from .eval_question_create import EvalQuestionCreate
100
106
  from .eval_question_result import EvalQuestionResult
107
+ from .extract_agent import ExtractAgent
108
+ from .extract_agent_data_schema_value import ExtractAgentDataSchemaValue
109
+ from .extract_config import ExtractConfig
110
+ from .extract_job import ExtractJob
111
+ from .extract_job_create import ExtractJobCreate
112
+ from .extract_job_create_data_schema_override_value import ExtractJobCreateDataSchemaOverrideValue
113
+ from .extract_mode import ExtractMode
114
+ from .extract_resultset import ExtractResultset
115
+ from .extract_resultset_data import ExtractResultsetData
116
+ from .extract_resultset_data_item_value import ExtractResultsetDataItemValue
117
+ from .extract_resultset_data_zero_value import ExtractResultsetDataZeroValue
118
+ from .extract_resultset_extraction_metadata_value import ExtractResultsetExtractionMetadataValue
101
119
  from .extraction_job import ExtractionJob
102
120
  from .extraction_result import ExtractionResult
103
121
  from .extraction_result_data_value import ExtractionResultDataValue
104
122
  from .extraction_schema import ExtractionSchema
105
123
  from .extraction_schema_data_schema_value import ExtractionSchemaDataSchemaValue
106
124
  from .file import File
125
+ from .file_permission_info_value import FilePermissionInfoValue
107
126
  from .file_resource_info_value import FileResourceInfoValue
108
127
  from .filter_condition import FilterCondition
109
128
  from .filter_operator import FilterOperator
@@ -113,10 +132,20 @@ from .http_validation_error import HttpValidationError
113
132
  from .hugging_face_inference_api_embedding import HuggingFaceInferenceApiEmbedding
114
133
  from .hugging_face_inference_api_embedding_config import HuggingFaceInferenceApiEmbeddingConfig
115
134
  from .hugging_face_inference_api_embedding_token import HuggingFaceInferenceApiEmbeddingToken
135
+ from .image_block import ImageBlock
116
136
  from .ingestion_error_response import IngestionErrorResponse
117
137
  from .input_message import InputMessage
118
138
  from .interval_usage_and_plan import IntervalUsageAndPlan
119
139
  from .job_name_mapping import JobNameMapping
140
+ from .job_names import JobNames
141
+ from .job_record import JobRecord
142
+ from .job_record_with_usage_metrics import JobRecordWithUsageMetrics
143
+ from .llama_index_core_base_llms_types_chat_message import LlamaIndexCoreBaseLlmsTypesChatMessage
144
+ from .llama_index_core_base_llms_types_chat_message_blocks_item import (
145
+ LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem,
146
+ LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem_Image,
147
+ LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem_Text,
148
+ )
120
149
  from .llama_parse_parameters import LlamaParseParameters
121
150
  from .llama_parse_supported_file_extensions import LlamaParseSupportedFileExtensions
122
151
  from .llm import Llm
@@ -149,12 +178,16 @@ from .page_screenshot_metadata import PageScreenshotMetadata
149
178
  from .page_screenshot_node_with_score import PageScreenshotNodeWithScore
150
179
  from .page_segmentation_config import PageSegmentationConfig
151
180
  from .page_splitter_node_parser import PageSplitterNodeParser
181
+ from .paginated_jobs_history_with_metrics import PaginatedJobsHistoryWithMetrics
152
182
  from .paginated_list_pipeline_files_response import PaginatedListPipelineFilesResponse
183
+ from .paginated_report_response import PaginatedReportResponse
184
+ from .parse_plan_level import ParsePlanLevel
153
185
  from .parser_languages import ParserLanguages
154
186
  from .parsing_history_item import ParsingHistoryItem
155
187
  from .parsing_job import ParsingJob
156
188
  from .parsing_job_json_result import ParsingJobJsonResult
157
189
  from .parsing_job_markdown_result import ParsingJobMarkdownResult
190
+ from .parsing_job_structured_result import ParsingJobStructuredResult
158
191
  from .parsing_job_text_result import ParsingJobTextResult
159
192
  from .parsing_usage import ParsingUsage
160
193
  from .partition_names import PartitionNames
@@ -193,6 +226,7 @@ from .pipeline_file_config_hash_value import PipelineFileConfigHashValue
193
226
  from .pipeline_file_create import PipelineFileCreate
194
227
  from .pipeline_file_create_custom_metadata_value import PipelineFileCreateCustomMetadataValue
195
228
  from .pipeline_file_custom_metadata_value import PipelineFileCustomMetadataValue
229
+ from .pipeline_file_permission_info_value import PipelineFilePermissionInfoValue
196
230
  from .pipeline_file_resource_info_value import PipelineFileResourceInfoValue
197
231
  from .pipeline_transform_config import (
198
232
  PipelineTransformConfig,
@@ -205,14 +239,40 @@ from .playground_session import PlaygroundSession
205
239
  from .pooling import Pooling
206
240
  from .preset_retrieval_params import PresetRetrievalParams
207
241
  from .presigned_url import PresignedUrl
242
+ from .progress_event import ProgressEvent
243
+ from .progress_event_status import ProgressEventStatus
208
244
  from .project import Project
209
245
  from .project_create import ProjectCreate
210
246
  from .prompt_mixin_prompts import PromptMixinPrompts
211
247
  from .prompt_spec import PromptSpec
212
248
  from .pydantic_program_mode import PydanticProgramMode
213
249
  from .related_node_info import RelatedNodeInfo
250
+ from .related_node_info_node_type import RelatedNodeInfoNodeType
251
+ from .report import Report
252
+ from .report_block import ReportBlock
253
+ from .report_block_dependency import ReportBlockDependency
254
+ from .report_create_response import ReportCreateResponse
255
+ from .report_event_item import ReportEventItem
256
+ from .report_event_item_event_data import (
257
+ ReportEventItemEventData,
258
+ ReportEventItemEventData_Progress,
259
+ ReportEventItemEventData_ReportBlockUpdate,
260
+ ReportEventItemEventData_ReportStateUpdate,
261
+ )
262
+ from .report_event_type import ReportEventType
263
+ from .report_metadata import ReportMetadata
264
+ from .report_plan import ReportPlan
265
+ from .report_plan_block import ReportPlanBlock
266
+ from .report_query import ReportQuery
267
+ from .report_response import ReportResponse
268
+ from .report_state import ReportState
269
+ from .report_state_event import ReportStateEvent
270
+ from .report_update_event import ReportUpdateEvent
214
271
  from .retrieval_mode import RetrievalMode
215
272
  from .retrieve_results import RetrieveResults
273
+ from .retriever import Retriever
274
+ from .retriever_create import RetrieverCreate
275
+ from .retriever_pipeline import RetrieverPipeline
216
276
  from .role import Role
217
277
  from .semantic_chunking_config import SemanticChunkingConfig
218
278
  from .sentence_chunking_config import SentenceChunkingConfig
@@ -220,6 +280,7 @@ from .sentence_splitter import SentenceSplitter
220
280
  from .status_enum import StatusEnum
221
281
  from .supported_llm_model import SupportedLlmModel
222
282
  from .supported_llm_model_names import SupportedLlmModelNames
283
+ from .text_block import TextBlock
223
284
  from .text_node import TextNode
224
285
  from .text_node_relationships_value import TextNodeRelationshipsValue
225
286
  from .text_node_with_score import TextNodeWithScore
@@ -227,6 +288,8 @@ from .token_chunking_config import TokenChunkingConfig
227
288
  from .token_text_splitter import TokenTextSplitter
228
289
  from .transformation_category_names import TransformationCategoryNames
229
290
  from .usage import Usage
291
+ from .usage_metric_response import UsageMetricResponse
292
+ from .user_job_record import UserJobRecord
230
293
  from .user_organization import UserOrganization
231
294
  from .user_organization_create import UserOrganizationCreate
232
295
  from .user_organization_delete import UserOrganizationDelete
@@ -249,6 +312,7 @@ __all__ = [
249
312
  "AdvancedModeTransformConfigSegmentationConfig_Element",
250
313
  "AdvancedModeTransformConfigSegmentationConfig_None",
251
314
  "AdvancedModeTransformConfigSegmentationConfig_Page",
315
+ "AppSchemaChatChatMessage",
252
316
  "AutoTransformConfig",
253
317
  "AzureOpenAiEmbedding",
254
318
  "AzureOpenAiEmbeddingConfig",
@@ -259,8 +323,9 @@ __all__ = [
259
323
  "BoxAuthMechanism",
260
324
  "CharacterChunkingConfig",
261
325
  "CharacterSplitter",
326
+ "ChatApp",
327
+ "ChatAppResponse",
262
328
  "ChatData",
263
- "ChatMessage",
264
329
  "CloudAzStorageBlobDataSource",
265
330
  "CloudAzureAiSearchVectorStore",
266
331
  "CloudBoxDataSource",
@@ -282,6 +347,9 @@ __all__ = [
282
347
  "CodeSplitter",
283
348
  "CohereEmbedding",
284
349
  "CohereEmbeddingConfig",
350
+ "CompositeRetrievalMode",
351
+ "CompositeRetrievalResult",
352
+ "CompositeRetrievedTextNode",
285
353
  "ConfigurableDataSinkNames",
286
354
  "ConfigurableDataSourceNames",
287
355
  "ConfigurableTransformationDefinition",
@@ -300,6 +368,7 @@ __all__ = [
300
368
  "DataSourceCreateCustomMetadataValue",
301
369
  "DataSourceCustomMetadataValue",
302
370
  "DataSourceDefinition",
371
+ "EditSuggestion",
303
372
  "ElementSegmentationConfig",
304
373
  "EmbeddingModelConfig",
305
374
  "EmbeddingModelConfigEmbeddingConfig",
@@ -328,12 +397,25 @@ __all__ = [
328
397
  "EvalQuestion",
329
398
  "EvalQuestionCreate",
330
399
  "EvalQuestionResult",
400
+ "ExtractAgent",
401
+ "ExtractAgentDataSchemaValue",
402
+ "ExtractConfig",
403
+ "ExtractJob",
404
+ "ExtractJobCreate",
405
+ "ExtractJobCreateDataSchemaOverrideValue",
406
+ "ExtractMode",
407
+ "ExtractResultset",
408
+ "ExtractResultsetData",
409
+ "ExtractResultsetDataItemValue",
410
+ "ExtractResultsetDataZeroValue",
411
+ "ExtractResultsetExtractionMetadataValue",
331
412
  "ExtractionJob",
332
413
  "ExtractionResult",
333
414
  "ExtractionResultDataValue",
334
415
  "ExtractionSchema",
335
416
  "ExtractionSchemaDataSchemaValue",
336
417
  "File",
418
+ "FilePermissionInfoValue",
337
419
  "FileResourceInfoValue",
338
420
  "FilterCondition",
339
421
  "FilterOperator",
@@ -343,10 +425,18 @@ __all__ = [
343
425
  "HuggingFaceInferenceApiEmbedding",
344
426
  "HuggingFaceInferenceApiEmbeddingConfig",
345
427
  "HuggingFaceInferenceApiEmbeddingToken",
428
+ "ImageBlock",
346
429
  "IngestionErrorResponse",
347
430
  "InputMessage",
348
431
  "IntervalUsageAndPlan",
349
432
  "JobNameMapping",
433
+ "JobNames",
434
+ "JobRecord",
435
+ "JobRecordWithUsageMetrics",
436
+ "LlamaIndexCoreBaseLlmsTypesChatMessage",
437
+ "LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem",
438
+ "LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem_Image",
439
+ "LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem_Text",
350
440
  "LlamaParseParameters",
351
441
  "LlamaParseSupportedFileExtensions",
352
442
  "Llm",
@@ -379,12 +469,16 @@ __all__ = [
379
469
  "PageScreenshotNodeWithScore",
380
470
  "PageSegmentationConfig",
381
471
  "PageSplitterNodeParser",
472
+ "PaginatedJobsHistoryWithMetrics",
382
473
  "PaginatedListPipelineFilesResponse",
474
+ "PaginatedReportResponse",
475
+ "ParsePlanLevel",
383
476
  "ParserLanguages",
384
477
  "ParsingHistoryItem",
385
478
  "ParsingJob",
386
479
  "ParsingJobJsonResult",
387
480
  "ParsingJobMarkdownResult",
481
+ "ParsingJobStructuredResult",
388
482
  "ParsingJobTextResult",
389
483
  "ParsingUsage",
390
484
  "PartitionNames",
@@ -419,6 +513,7 @@ __all__ = [
419
513
  "PipelineFileCreate",
420
514
  "PipelineFileCreateCustomMetadataValue",
421
515
  "PipelineFileCustomMetadataValue",
516
+ "PipelineFilePermissionInfoValue",
422
517
  "PipelineFileResourceInfoValue",
423
518
  "PipelineTransformConfig",
424
519
  "PipelineTransformConfig_Advanced",
@@ -429,14 +524,38 @@ __all__ = [
429
524
  "Pooling",
430
525
  "PresetRetrievalParams",
431
526
  "PresignedUrl",
527
+ "ProgressEvent",
528
+ "ProgressEventStatus",
432
529
  "Project",
433
530
  "ProjectCreate",
434
531
  "PromptMixinPrompts",
435
532
  "PromptSpec",
436
533
  "PydanticProgramMode",
437
534
  "RelatedNodeInfo",
535
+ "RelatedNodeInfoNodeType",
536
+ "Report",
537
+ "ReportBlock",
538
+ "ReportBlockDependency",
539
+ "ReportCreateResponse",
540
+ "ReportEventItem",
541
+ "ReportEventItemEventData",
542
+ "ReportEventItemEventData_Progress",
543
+ "ReportEventItemEventData_ReportBlockUpdate",
544
+ "ReportEventItemEventData_ReportStateUpdate",
545
+ "ReportEventType",
546
+ "ReportMetadata",
547
+ "ReportPlan",
548
+ "ReportPlanBlock",
549
+ "ReportQuery",
550
+ "ReportResponse",
551
+ "ReportState",
552
+ "ReportStateEvent",
553
+ "ReportUpdateEvent",
438
554
  "RetrievalMode",
439
555
  "RetrieveResults",
556
+ "Retriever",
557
+ "RetrieverCreate",
558
+ "RetrieverPipeline",
440
559
  "Role",
441
560
  "SemanticChunkingConfig",
442
561
  "SentenceChunkingConfig",
@@ -444,6 +563,7 @@ __all__ = [
444
563
  "StatusEnum",
445
564
  "SupportedLlmModel",
446
565
  "SupportedLlmModelNames",
566
+ "TextBlock",
447
567
  "TextNode",
448
568
  "TextNodeRelationshipsValue",
449
569
  "TextNodeWithScore",
@@ -451,6 +571,8 @@ __all__ = [
451
571
  "TokenTextSplitter",
452
572
  "TransformationCategoryNames",
453
573
  "Usage",
574
+ "UsageMetricResponse",
575
+ "UserJobRecord",
454
576
  "UserOrganization",
455
577
  "UserOrganizationCreate",
456
578
  "UserOrganizationDelete",
@@ -16,13 +16,13 @@ except ImportError:
16
16
  import pydantic # type: ignore
17
17
 
18
18
 
19
- class ChatMessage(pydantic.BaseModel):
19
+ class AppSchemaChatChatMessage(pydantic.BaseModel):
20
20
  id: str
21
21
  index: int = pydantic.Field(description="The index of the message in the chat.")
22
22
  annotations: typing.Optional[typing.List[MessageAnnotation]] = pydantic.Field(
23
23
  description="Retrieval annotations for the message."
24
24
  )
25
- role: MessageRole
25
+ role: MessageRole = pydantic.Field(description="The role of the message.")
26
26
  content: typing.Optional[str]
27
27
  additional_kwargs: typing.Optional[typing.Dict[str, str]] = pydantic.Field(
28
28
  description="Additional arguments passed to the model"
@@ -0,0 +1,44 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+ from .llm_parameters import LlmParameters
8
+ from .preset_retrieval_params import PresetRetrievalParams
9
+
10
+ try:
11
+ import pydantic
12
+ if pydantic.__version__.startswith("1."):
13
+ raise ImportError
14
+ import pydantic.v1 as pydantic # type: ignore
15
+ except ImportError:
16
+ import pydantic # type: ignore
17
+
18
+
19
+ class ChatApp(pydantic.BaseModel):
20
+ """
21
+ Schema for a chat app
22
+ """
23
+
24
+ id: str
25
+ name: str
26
+ pipeline_id: str
27
+ project_id: str
28
+ llm_config: LlmParameters
29
+ retrieval_config: PresetRetrievalParams
30
+ created_at: dt.datetime
31
+ updated_at: dt.datetime
32
+
33
+ def json(self, **kwargs: typing.Any) -> str:
34
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
35
+ return super().json(**kwargs_with_defaults)
36
+
37
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
38
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
39
+ return super().dict(**kwargs_with_defaults)
40
+
41
+ class Config:
42
+ frozen = True
43
+ smart_union = True
44
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,41 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+ from .llm_parameters import LlmParameters
8
+ from .preset_retrieval_params import PresetRetrievalParams
9
+
10
+ try:
11
+ import pydantic
12
+ if pydantic.__version__.startswith("1."):
13
+ raise ImportError
14
+ import pydantic.v1 as pydantic # type: ignore
15
+ except ImportError:
16
+ import pydantic # type: ignore
17
+
18
+
19
+ class ChatAppResponse(pydantic.BaseModel):
20
+ id: str
21
+ name: str
22
+ pipeline_id: str
23
+ project_id: str
24
+ llm_config: LlmParameters
25
+ retrieval_config: PresetRetrievalParams
26
+ created_at: dt.datetime
27
+ updated_at: dt.datetime
28
+ pipeline_name: str
29
+
30
+ def json(self, **kwargs: typing.Any) -> str:
31
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
32
+ return super().json(**kwargs_with_defaults)
33
+
34
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
35
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
36
+ return super().dict(**kwargs_with_defaults)
37
+
38
+ class Config:
39
+ frozen = True
40
+ smart_union = True
41
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -15,6 +15,7 @@ except ImportError:
15
15
 
16
16
 
17
17
  class CloudAzStorageBlobDataSource(pydantic.BaseModel):
18
+ supports_access_control: typing.Optional[bool]
18
19
  container_name: str = pydantic.Field(description="The name of the Azure Storage Blob container to read from.")
19
20
  account_url: str = pydantic.Field(description="The Azure Storage Blob account URL to use for authentication.")
20
21
  blob: typing.Optional[str]
@@ -16,6 +16,7 @@ except ImportError:
16
16
 
17
17
 
18
18
  class CloudBoxDataSource(pydantic.BaseModel):
19
+ supports_access_control: typing.Optional[bool]
19
20
  folder_id: typing.Optional[str]
20
21
  authentication_mechanism: BoxAuthMechanism = pydantic.Field(
21
22
  description="The type of authentication to use (Developer Token or CCG)"
@@ -15,6 +15,7 @@ except ImportError:
15
15
 
16
16
 
17
17
  class CloudConfluenceDataSource(pydantic.BaseModel):
18
+ supports_access_control: typing.Optional[bool]
18
19
  server_url: str = pydantic.Field(description="The server URL of the Confluence instance.")
19
20
  authentication_mechanism: str = pydantic.Field(
20
21
  description="Type of Authentication for connecting to Confluence APIs."
@@ -15,6 +15,7 @@ except ImportError:
15
15
 
16
16
 
17
17
  class CloudGoogleDriveDataSource(pydantic.BaseModel):
18
+ supports_access_control: typing.Optional[bool]
18
19
  folder_id: str = pydantic.Field(description="The ID of the Google Drive folder to read from.")
19
20
  service_account_key: typing.Dict[str, typing.Any] = pydantic.Field(
20
21
  description="The service account key JSON to use for authentication."
@@ -19,6 +19,7 @@ class CloudJiraDataSource(pydantic.BaseModel):
19
19
  Cloud Jira Data Source integrating JiraReader.
20
20
  """
21
21
 
22
+ supports_access_control: typing.Optional[bool]
22
23
  email: typing.Optional[str]
23
24
  api_token: typing.Optional[str]
24
25
  server_url: typing.Optional[str]
@@ -15,6 +15,7 @@ except ImportError:
15
15
 
16
16
 
17
17
  class CloudNotionPageDataSource(pydantic.BaseModel):
18
+ supports_access_control: typing.Optional[bool]
18
19
  integration_token: str = pydantic.Field(description="The integration token to use for authentication.")
19
20
  database_ids: typing.Optional[str]
20
21
  page_ids: typing.Optional[str]
@@ -15,6 +15,7 @@ except ImportError:
15
15
 
16
16
 
17
17
  class CloudOneDriveDataSource(pydantic.BaseModel):
18
+ supports_access_control: typing.Optional[bool]
18
19
  user_principal_name: str = pydantic.Field(description="The user principal name to use for authentication.")
19
20
  folder_path: typing.Optional[str]
20
21
  folder_id: typing.Optional[str]
@@ -25,6 +25,7 @@ class CloudPostgresVectorStore(pydantic.BaseModel):
25
25
  schema_name: str
26
26
  embed_dim: int
27
27
  hybrid_search: typing.Optional[bool]
28
+ perform_setup: typing.Optional[bool]
28
29
  class_name: typing.Optional[str]
29
30
 
30
31
  def json(self, **kwargs: typing.Any) -> str:
@@ -15,6 +15,7 @@ except ImportError:
15
15
 
16
16
 
17
17
  class CloudS3DataSource(pydantic.BaseModel):
18
+ supports_access_control: typing.Optional[bool]
18
19
  bucket: str = pydantic.Field(description="The name of the S3 bucket to read from.")
19
20
  prefix: typing.Optional[str]
20
21
  aws_access_id: typing.Optional[str]
@@ -15,6 +15,7 @@ except ImportError:
15
15
 
16
16
 
17
17
  class CloudSharepointDataSource(pydantic.BaseModel):
18
+ supports_access_control: typing.Optional[bool]
18
19
  site_name: typing.Optional[str]
19
20
  site_id: typing.Optional[str]
20
21
  folder_path: typing.Optional[str]
@@ -15,6 +15,7 @@ except ImportError:
15
15
 
16
16
 
17
17
  class CloudSlackDataSource(pydantic.BaseModel):
18
+ supports_access_control: typing.Optional[bool]
18
19
  slack_token: str = pydantic.Field(description="Slack Bot Token.")
19
20
  channel_ids: typing.Optional[str]
20
21
  latest_date: typing.Optional[str]
@@ -0,0 +1,21 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import enum
4
+ import typing
5
+
6
+ T_Result = typing.TypeVar("T_Result")
7
+
8
+
9
+ class CompositeRetrievalMode(str, enum.Enum):
10
+ """
11
+ Enum for the mode of composite retrieval.
12
+ """
13
+
14
+ ROUTING = "routing"
15
+ FULL = "full"
16
+
17
+ def visit(self, routing: typing.Callable[[], T_Result], full: typing.Callable[[], T_Result]) -> T_Result:
18
+ if self is CompositeRetrievalMode.ROUTING:
19
+ return routing()
20
+ if self is CompositeRetrievalMode.FULL:
21
+ return full()
@@ -0,0 +1,38 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+ from .composite_retrieved_text_node import CompositeRetrievedTextNode
8
+ from .page_screenshot_node_with_score import PageScreenshotNodeWithScore
9
+
10
+ try:
11
+ import pydantic
12
+ if pydantic.__version__.startswith("1."):
13
+ raise ImportError
14
+ import pydantic.v1 as pydantic # type: ignore
15
+ except ImportError:
16
+ import pydantic # type: ignore
17
+
18
+
19
+ class CompositeRetrievalResult(pydantic.BaseModel):
20
+ nodes: typing.Optional[typing.List[CompositeRetrievedTextNode]] = pydantic.Field(
21
+ description="The retrieved nodes from the composite retrieval."
22
+ )
23
+ image_nodes: typing.Optional[typing.List[PageScreenshotNodeWithScore]] = pydantic.Field(
24
+ description="The image nodes retrieved by the pipeline for the given query."
25
+ )
26
+
27
+ def json(self, **kwargs: typing.Any) -> str:
28
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
29
+ return super().json(**kwargs_with_defaults)
30
+
31
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
32
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
33
+ return super().dict(**kwargs_with_defaults)
34
+
35
+ class Config:
36
+ frozen = True
37
+ smart_union = True
38
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,42 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+
8
+ try:
9
+ import pydantic
10
+ if pydantic.__version__.startswith("1."):
11
+ raise ImportError
12
+ import pydantic.v1 as pydantic # type: ignore
13
+ except ImportError:
14
+ import pydantic # type: ignore
15
+
16
+
17
+ class CompositeRetrievedTextNode(pydantic.BaseModel):
18
+ id: str = pydantic.Field(description="The ID of the retrieved node.")
19
+ retriever_id: str = pydantic.Field(description="The ID of the retriever this node was retrieved from.")
20
+ retriever_pipeline_name: str = pydantic.Field(
21
+ description="The name of the retrieval pipeline this node was retrieved from."
22
+ )
23
+ pipeline_id: str = pydantic.Field(description="The ID of the pipeline this node was retrieved from.")
24
+ metadata: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(
25
+ description="Metadata associated with the retrieved node."
26
+ )
27
+ text: str = pydantic.Field(description="The text of the retrieved node.")
28
+ start_char_idx: typing.Optional[int]
29
+ end_char_idx: typing.Optional[int]
30
+
31
+ def json(self, **kwargs: typing.Any) -> str:
32
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
33
+ return super().json(**kwargs_with_defaults)
34
+
35
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
36
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
37
+ return super().dict(**kwargs_with_defaults)
38
+
39
+ class Config:
40
+ frozen = True
41
+ smart_union = True
42
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -26,7 +26,7 @@ class DataSink(pydantic.BaseModel):
26
26
  updated_at: typing.Optional[dt.datetime]
27
27
  name: str = pydantic.Field(description="The name of the data sink.")
28
28
  sink_type: ConfigurableDataSinkNames
29
- component: DataSinkComponent
29
+ component: DataSinkComponent = pydantic.Field(description="Component that implements the data sink")
30
30
  project_id: str
31
31
 
32
32
  def json(self, **kwargs: typing.Any) -> str:
@@ -23,7 +23,7 @@ class DataSinkCreate(pydantic.BaseModel):
23
23
 
24
24
  name: str = pydantic.Field(description="The name of the data sink.")
25
25
  sink_type: ConfigurableDataSinkNames
26
- component: DataSinkCreateComponent
26
+ component: DataSinkCreateComponent = pydantic.Field(description="Component that implements the data sink")
27
27
 
28
28
  def json(self, **kwargs: typing.Any) -> str:
29
29
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -28,7 +28,7 @@ class DataSource(pydantic.BaseModel):
28
28
  name: str = pydantic.Field(description="The name of the data source.")
29
29
  source_type: ConfigurableDataSourceNames
30
30
  custom_metadata: typing.Optional[typing.Dict[str, typing.Optional[DataSourceCustomMetadataValue]]]
31
- component: DataSourceComponent
31
+ component: DataSourceComponent = pydantic.Field(description="Component that implements the data source")
32
32
  project_id: str
33
33
 
34
34
  def json(self, **kwargs: typing.Any) -> str:
@@ -25,7 +25,7 @@ class DataSourceCreate(pydantic.BaseModel):
25
25
  name: str = pydantic.Field(description="The name of the data source.")
26
26
  source_type: ConfigurableDataSourceNames
27
27
  custom_metadata: typing.Optional[typing.Dict[str, typing.Optional[DataSourceCreateCustomMetadataValue]]]
28
- component: DataSourceCreateComponent
28
+ component: DataSourceCreateComponent = pydantic.Field(description="Component that implements the data source")
29
29
 
30
30
  def json(self, **kwargs: typing.Any) -> str:
31
31
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}