llama-cloud 0.1.30__py3-none-any.whl → 0.1.31__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of llama-cloud might be problematic. Click here for more details.
- llama_cloud/__init__.py +26 -14
- llama_cloud/client.py +0 -3
- llama_cloud/resources/__init__.py +0 -2
- llama_cloud/resources/beta/client.py +602 -0
- llama_cloud/resources/parsing/client.py +8 -0
- llama_cloud/resources/pipelines/client.py +64 -0
- llama_cloud/types/__init__.py +26 -12
- llama_cloud/types/{model_configuration.py → agent_data.py} +8 -7
- llama_cloud/types/agent_deployment_summary.py +1 -1
- llama_cloud/types/{message.py → aggregate_group.py} +8 -9
- llama_cloud/types/base_plan.py +3 -0
- llama_cloud/types/filter_operation.py +46 -0
- llama_cloud/types/filter_operation_eq.py +6 -0
- llama_cloud/types/filter_operation_gt.py +6 -0
- llama_cloud/types/filter_operation_gte.py +6 -0
- llama_cloud/types/filter_operation_includes_item.py +6 -0
- llama_cloud/types/filter_operation_lt.py +6 -0
- llama_cloud/types/filter_operation_lte.py +6 -0
- llama_cloud/types/input_message.py +2 -2
- llama_cloud/types/legacy_parse_job_config.py +3 -0
- llama_cloud/types/llama_index_core_base_llms_types_chat_message.py +2 -2
- llama_cloud/types/llama_parse_parameters.py +1 -0
- llama_cloud/types/{llama_index_core_base_llms_types_message_role.py → message_role.py} +9 -9
- llama_cloud/types/{text_content_block.py → paginated_response_agent_data.py} +5 -5
- llama_cloud/types/paginated_response_aggregate_group.py +34 -0
- llama_cloud/types/parse_job_config.py +1 -0
- llama_cloud/types/playground_session.py +2 -2
- llama_cloud/types/role.py +0 -1
- llama_cloud/types/{app_schema_chat_chat_message.py → src_app_schema_chat_chat_message.py} +3 -3
- llama_cloud/types/user_organization_role.py +0 -1
- {llama_cloud-0.1.30.dist-info → llama_cloud-0.1.31.dist-info}/METADATA +1 -1
- {llama_cloud-0.1.30.dist-info → llama_cloud-0.1.31.dist-info}/RECORD +34 -29
- llama_cloud/resources/responses/__init__.py +0 -2
- llama_cloud/resources/responses/client.py +0 -137
- llama_cloud/types/app_schema_responses_message_role.py +0 -33
- {llama_cloud-0.1.30.dist-info → llama_cloud-0.1.31.dist-info}/LICENSE +0 -0
- {llama_cloud-0.1.30.dist-info → llama_cloud-0.1.31.dist-info}/WHEEL +0 -0
llama_cloud/__init__.py
CHANGED
|
@@ -12,10 +12,10 @@ from .types import (
|
|
|
12
12
|
AdvancedModeTransformConfigSegmentationConfig_Element,
|
|
13
13
|
AdvancedModeTransformConfigSegmentationConfig_None,
|
|
14
14
|
AdvancedModeTransformConfigSegmentationConfig_Page,
|
|
15
|
+
AgentData,
|
|
15
16
|
AgentDeploymentList,
|
|
16
17
|
AgentDeploymentSummary,
|
|
17
|
-
|
|
18
|
-
AppSchemaResponsesMessageRole,
|
|
18
|
+
AggregateGroup,
|
|
19
19
|
AudioBlock,
|
|
20
20
|
AutoTransformConfig,
|
|
21
21
|
AzureOpenAiEmbedding,
|
|
@@ -135,6 +135,13 @@ from .types import (
|
|
|
135
135
|
FilePermissionInfoValue,
|
|
136
136
|
FileResourceInfoValue,
|
|
137
137
|
FilterCondition,
|
|
138
|
+
FilterOperation,
|
|
139
|
+
FilterOperationEq,
|
|
140
|
+
FilterOperationGt,
|
|
141
|
+
FilterOperationGte,
|
|
142
|
+
FilterOperationIncludesItem,
|
|
143
|
+
FilterOperationLt,
|
|
144
|
+
FilterOperationLte,
|
|
138
145
|
FilterOperator,
|
|
139
146
|
FreeCreditsUsage,
|
|
140
147
|
GeminiEmbedding,
|
|
@@ -170,7 +177,6 @@ from .types import (
|
|
|
170
177
|
LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem_Document,
|
|
171
178
|
LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem_Image,
|
|
172
179
|
LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem_Text,
|
|
173
|
-
LlamaIndexCoreBaseLlmsTypesMessageRole,
|
|
174
180
|
LlamaParseParameters,
|
|
175
181
|
LlamaParseParametersPriority,
|
|
176
182
|
LlamaParseSupportedFileExtensions,
|
|
@@ -179,13 +185,12 @@ from .types import (
|
|
|
179
185
|
LoadFilesJobConfig,
|
|
180
186
|
ManagedIngestionStatus,
|
|
181
187
|
ManagedIngestionStatusResponse,
|
|
182
|
-
Message,
|
|
183
188
|
MessageAnnotation,
|
|
189
|
+
MessageRole,
|
|
184
190
|
MetadataFilter,
|
|
185
191
|
MetadataFilterValue,
|
|
186
192
|
MetadataFilters,
|
|
187
193
|
MetadataFiltersFiltersItem,
|
|
188
|
-
ModelConfiguration,
|
|
189
194
|
NodeRelationship,
|
|
190
195
|
NoneChunkingConfig,
|
|
191
196
|
NoneSegmentationConfig,
|
|
@@ -204,6 +209,8 @@ from .types import (
|
|
|
204
209
|
PaginatedListCloudDocumentsResponse,
|
|
205
210
|
PaginatedListPipelineFilesResponse,
|
|
206
211
|
PaginatedReportResponse,
|
|
212
|
+
PaginatedResponseAgentData,
|
|
213
|
+
PaginatedResponseAggregateGroup,
|
|
207
214
|
ParseJobConfig,
|
|
208
215
|
ParseJobConfigPriority,
|
|
209
216
|
ParsePlanLevel,
|
|
@@ -307,13 +314,13 @@ from .types import (
|
|
|
307
314
|
SchemaRelaxMode,
|
|
308
315
|
SemanticChunkingConfig,
|
|
309
316
|
SentenceChunkingConfig,
|
|
317
|
+
SrcAppSchemaChatChatMessage,
|
|
310
318
|
StatusEnum,
|
|
311
319
|
StructMode,
|
|
312
320
|
StructParseConf,
|
|
313
321
|
SupportedLlmModel,
|
|
314
322
|
SupportedLlmModelNames,
|
|
315
323
|
TextBlock,
|
|
316
|
-
TextContentBlock,
|
|
317
324
|
TextNode,
|
|
318
325
|
TextNodeRelationshipsValue,
|
|
319
326
|
TextNodeWithScore,
|
|
@@ -387,7 +394,6 @@ from .resources import (
|
|
|
387
394
|
pipelines,
|
|
388
395
|
projects,
|
|
389
396
|
reports,
|
|
390
|
-
responses,
|
|
391
397
|
retrievers,
|
|
392
398
|
)
|
|
393
399
|
from .environment import LlamaCloudEnvironment
|
|
@@ -404,10 +410,10 @@ __all__ = [
|
|
|
404
410
|
"AdvancedModeTransformConfigSegmentationConfig_Element",
|
|
405
411
|
"AdvancedModeTransformConfigSegmentationConfig_None",
|
|
406
412
|
"AdvancedModeTransformConfigSegmentationConfig_Page",
|
|
413
|
+
"AgentData",
|
|
407
414
|
"AgentDeploymentList",
|
|
408
415
|
"AgentDeploymentSummary",
|
|
409
|
-
"
|
|
410
|
-
"AppSchemaResponsesMessageRole",
|
|
416
|
+
"AggregateGroup",
|
|
411
417
|
"AudioBlock",
|
|
412
418
|
"AutoTransformConfig",
|
|
413
419
|
"AzureOpenAiEmbedding",
|
|
@@ -549,6 +555,13 @@ __all__ = [
|
|
|
549
555
|
"FilePermissionInfoValue",
|
|
550
556
|
"FileResourceInfoValue",
|
|
551
557
|
"FilterCondition",
|
|
558
|
+
"FilterOperation",
|
|
559
|
+
"FilterOperationEq",
|
|
560
|
+
"FilterOperationGt",
|
|
561
|
+
"FilterOperationGte",
|
|
562
|
+
"FilterOperationIncludesItem",
|
|
563
|
+
"FilterOperationLt",
|
|
564
|
+
"FilterOperationLte",
|
|
552
565
|
"FilterOperator",
|
|
553
566
|
"FreeCreditsUsage",
|
|
554
567
|
"GeminiEmbedding",
|
|
@@ -585,7 +598,6 @@ __all__ = [
|
|
|
585
598
|
"LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem_Document",
|
|
586
599
|
"LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem_Image",
|
|
587
600
|
"LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem_Text",
|
|
588
|
-
"LlamaIndexCoreBaseLlmsTypesMessageRole",
|
|
589
601
|
"LlamaParseParameters",
|
|
590
602
|
"LlamaParseParametersPriority",
|
|
591
603
|
"LlamaParseSupportedFileExtensions",
|
|
@@ -594,13 +606,12 @@ __all__ = [
|
|
|
594
606
|
"LoadFilesJobConfig",
|
|
595
607
|
"ManagedIngestionStatus",
|
|
596
608
|
"ManagedIngestionStatusResponse",
|
|
597
|
-
"Message",
|
|
598
609
|
"MessageAnnotation",
|
|
610
|
+
"MessageRole",
|
|
599
611
|
"MetadataFilter",
|
|
600
612
|
"MetadataFilterValue",
|
|
601
613
|
"MetadataFilters",
|
|
602
614
|
"MetadataFiltersFiltersItem",
|
|
603
|
-
"ModelConfiguration",
|
|
604
615
|
"NodeRelationship",
|
|
605
616
|
"NoneChunkingConfig",
|
|
606
617
|
"NoneSegmentationConfig",
|
|
@@ -619,6 +630,8 @@ __all__ = [
|
|
|
619
630
|
"PaginatedListCloudDocumentsResponse",
|
|
620
631
|
"PaginatedListPipelineFilesResponse",
|
|
621
632
|
"PaginatedReportResponse",
|
|
633
|
+
"PaginatedResponseAgentData",
|
|
634
|
+
"PaginatedResponseAggregateGroup",
|
|
622
635
|
"ParseJobConfig",
|
|
623
636
|
"ParseJobConfigPriority",
|
|
624
637
|
"ParsePlanLevel",
|
|
@@ -733,13 +746,13 @@ __all__ = [
|
|
|
733
746
|
"SchemaRelaxMode",
|
|
734
747
|
"SemanticChunkingConfig",
|
|
735
748
|
"SentenceChunkingConfig",
|
|
749
|
+
"SrcAppSchemaChatChatMessage",
|
|
736
750
|
"StatusEnum",
|
|
737
751
|
"StructMode",
|
|
738
752
|
"StructParseConf",
|
|
739
753
|
"SupportedLlmModel",
|
|
740
754
|
"SupportedLlmModelNames",
|
|
741
755
|
"TextBlock",
|
|
742
|
-
"TextContentBlock",
|
|
743
756
|
"TextNode",
|
|
744
757
|
"TextNodeRelationshipsValue",
|
|
745
758
|
"TextNodeWithScore",
|
|
@@ -778,6 +791,5 @@ __all__ = [
|
|
|
778
791
|
"pipelines",
|
|
779
792
|
"projects",
|
|
780
793
|
"reports",
|
|
781
|
-
"responses",
|
|
782
794
|
"retrievers",
|
|
783
795
|
]
|
llama_cloud/client.py
CHANGED
|
@@ -22,7 +22,6 @@ from .resources.parsing.client import AsyncParsingClient, ParsingClient
|
|
|
22
22
|
from .resources.pipelines.client import AsyncPipelinesClient, PipelinesClient
|
|
23
23
|
from .resources.projects.client import AsyncProjectsClient, ProjectsClient
|
|
24
24
|
from .resources.reports.client import AsyncReportsClient, ReportsClient
|
|
25
|
-
from .resources.responses.client import AsyncResponsesClient, ResponsesClient
|
|
26
25
|
from .resources.retrievers.client import AsyncRetrieversClient, RetrieversClient
|
|
27
26
|
|
|
28
27
|
|
|
@@ -48,7 +47,6 @@ class LlamaCloud:
|
|
|
48
47
|
self.projects = ProjectsClient(client_wrapper=self._client_wrapper)
|
|
49
48
|
self.files = FilesClient(client_wrapper=self._client_wrapper)
|
|
50
49
|
self.pipelines = PipelinesClient(client_wrapper=self._client_wrapper)
|
|
51
|
-
self.responses = ResponsesClient(client_wrapper=self._client_wrapper)
|
|
52
50
|
self.retrievers = RetrieversClient(client_wrapper=self._client_wrapper)
|
|
53
51
|
self.jobs = JobsClient(client_wrapper=self._client_wrapper)
|
|
54
52
|
self.evals = EvalsClient(client_wrapper=self._client_wrapper)
|
|
@@ -83,7 +81,6 @@ class AsyncLlamaCloud:
|
|
|
83
81
|
self.projects = AsyncProjectsClient(client_wrapper=self._client_wrapper)
|
|
84
82
|
self.files = AsyncFilesClient(client_wrapper=self._client_wrapper)
|
|
85
83
|
self.pipelines = AsyncPipelinesClient(client_wrapper=self._client_wrapper)
|
|
86
|
-
self.responses = AsyncResponsesClient(client_wrapper=self._client_wrapper)
|
|
87
84
|
self.retrievers = AsyncRetrieversClient(client_wrapper=self._client_wrapper)
|
|
88
85
|
self.jobs = AsyncJobsClient(client_wrapper=self._client_wrapper)
|
|
89
86
|
self.evals = AsyncEvalsClient(client_wrapper=self._client_wrapper)
|
|
@@ -17,7 +17,6 @@ from . import (
|
|
|
17
17
|
pipelines,
|
|
18
18
|
projects,
|
|
19
19
|
reports,
|
|
20
|
-
responses,
|
|
21
20
|
retrievers,
|
|
22
21
|
)
|
|
23
22
|
from .data_sinks import DataSinkUpdateComponent
|
|
@@ -109,6 +108,5 @@ __all__ = [
|
|
|
109
108
|
"pipelines",
|
|
110
109
|
"projects",
|
|
111
110
|
"reports",
|
|
112
|
-
"responses",
|
|
113
111
|
"retrievers",
|
|
114
112
|
]
|