llama-cloud 0.1.25__py3-none-any.whl → 0.1.27__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of llama-cloud might be problematic. Click here for more details.

Files changed (37) hide show
  1. llama_cloud/__init__.py +28 -2
  2. llama_cloud/client.py +6 -0
  3. llama_cloud/resources/__init__.py +4 -0
  4. llama_cloud/resources/beta/client.py +14 -2
  5. llama_cloud/resources/llama_apps/__init__.py +2 -0
  6. llama_cloud/resources/llama_apps/client.py +160 -0
  7. llama_cloud/resources/llama_extract/client.py +129 -0
  8. llama_cloud/resources/parsing/client.py +8 -0
  9. llama_cloud/resources/responses/__init__.py +2 -0
  10. llama_cloud/resources/responses/client.py +137 -0
  11. llama_cloud/types/__init__.py +24 -2
  12. llama_cloud/types/agent_deployment_list.py +32 -0
  13. llama_cloud/types/agent_deployment_summary.py +38 -0
  14. llama_cloud/types/app_schema_chat_chat_message.py +2 -2
  15. llama_cloud/types/app_schema_responses_message_role.py +33 -0
  16. llama_cloud/types/cloud_google_drive_data_source.py +1 -3
  17. llama_cloud/types/extract_config.py +2 -0
  18. llama_cloud/types/extract_config_priority.py +29 -0
  19. llama_cloud/types/extract_models.py +8 -0
  20. llama_cloud/types/extract_schema_generate_response.py +38 -0
  21. llama_cloud/types/extract_schema_generate_response_data_schema_value.py +7 -0
  22. llama_cloud/types/input_message.py +2 -2
  23. llama_cloud/types/legacy_parse_job_config.py +1 -0
  24. llama_cloud/types/llama_index_core_base_llms_types_chat_message.py +2 -2
  25. llama_cloud/types/{message_role.py → llama_index_core_base_llms_types_message_role.py} +9 -9
  26. llama_cloud/types/llama_parse_parameters.py +3 -0
  27. llama_cloud/types/llama_parse_parameters_priority.py +29 -0
  28. llama_cloud/types/message.py +38 -0
  29. llama_cloud/types/metadata_filter.py +1 -1
  30. llama_cloud/types/model_configuration.py +39 -0
  31. llama_cloud/types/parse_job_config.py +3 -0
  32. llama_cloud/types/parse_job_config_priority.py +29 -0
  33. llama_cloud/types/text_content_block.py +34 -0
  34. {llama_cloud-0.1.25.dist-info → llama_cloud-0.1.27.dist-info}/METADATA +3 -2
  35. {llama_cloud-0.1.25.dist-info → llama_cloud-0.1.27.dist-info}/RECORD +37 -22
  36. {llama_cloud-0.1.25.dist-info → llama_cloud-0.1.27.dist-info}/WHEEL +1 -1
  37. {llama_cloud-0.1.25.dist-info → llama_cloud-0.1.27.dist-info}/LICENSE +0 -0
llama_cloud/__init__.py CHANGED
@@ -12,7 +12,10 @@ from .types import (
12
12
  AdvancedModeTransformConfigSegmentationConfig_Element,
13
13
  AdvancedModeTransformConfigSegmentationConfig_None,
14
14
  AdvancedModeTransformConfigSegmentationConfig_Page,
15
+ AgentDeploymentList,
16
+ AgentDeploymentSummary,
15
17
  AppSchemaChatChatMessage,
18
+ AppSchemaResponsesMessageRole,
16
19
  AudioBlock,
17
20
  AutoTransformConfig,
18
21
  AzureOpenAiEmbedding,
@@ -101,6 +104,7 @@ from .types import (
101
104
  ExtractAgent,
102
105
  ExtractAgentDataSchemaValue,
103
106
  ExtractConfig,
107
+ ExtractConfigPriority,
104
108
  ExtractJob,
105
109
  ExtractJobCreate,
106
110
  ExtractJobCreateDataSchemaOverride,
@@ -118,6 +122,8 @@ from .types import (
118
122
  ExtractRunDataSchemaValue,
119
123
  ExtractRunDataZeroValue,
120
124
  ExtractRunExtractionMetadataValue,
125
+ ExtractSchemaGenerateResponse,
126
+ ExtractSchemaGenerateResponseDataSchemaValue,
121
127
  ExtractSchemaValidateResponse,
122
128
  ExtractSchemaValidateResponseDataSchemaValue,
123
129
  ExtractState,
@@ -164,19 +170,22 @@ from .types import (
164
170
  LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem_Document,
165
171
  LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem_Image,
166
172
  LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem_Text,
173
+ LlamaIndexCoreBaseLlmsTypesMessageRole,
167
174
  LlamaParseParameters,
175
+ LlamaParseParametersPriority,
168
176
  LlamaParseSupportedFileExtensions,
169
177
  LlmModelData,
170
178
  LlmParameters,
171
179
  LoadFilesJobConfig,
172
180
  ManagedIngestionStatus,
173
181
  ManagedIngestionStatusResponse,
182
+ Message,
174
183
  MessageAnnotation,
175
- MessageRole,
176
184
  MetadataFilter,
177
185
  MetadataFilterValue,
178
186
  MetadataFilters,
179
187
  MetadataFiltersFiltersItem,
188
+ ModelConfiguration,
180
189
  NodeRelationship,
181
190
  NoneChunkingConfig,
182
191
  NoneSegmentationConfig,
@@ -196,6 +205,7 @@ from .types import (
196
205
  PaginatedListPipelineFilesResponse,
197
206
  PaginatedReportResponse,
198
207
  ParseJobConfig,
208
+ ParseJobConfigPriority,
199
209
  ParsePlanLevel,
200
210
  ParserLanguages,
201
211
  ParsingHistoryItem,
@@ -303,6 +313,7 @@ from .types import (
303
313
  SupportedLlmModel,
304
314
  SupportedLlmModelNames,
305
315
  TextBlock,
316
+ TextContentBlock,
306
317
  TextNode,
307
318
  TextNodeRelationshipsValue,
308
319
  TextNodeWithScore,
@@ -366,12 +377,14 @@ from .resources import (
366
377
  evals,
367
378
  files,
368
379
  jobs,
380
+ llama_apps,
369
381
  llama_extract,
370
382
  organizations,
371
383
  parsing,
372
384
  pipelines,
373
385
  projects,
374
386
  reports,
387
+ responses,
375
388
  retrievers,
376
389
  )
377
390
  from .environment import LlamaCloudEnvironment
@@ -388,7 +401,10 @@ __all__ = [
388
401
  "AdvancedModeTransformConfigSegmentationConfig_Element",
389
402
  "AdvancedModeTransformConfigSegmentationConfig_None",
390
403
  "AdvancedModeTransformConfigSegmentationConfig_Page",
404
+ "AgentDeploymentList",
405
+ "AgentDeploymentSummary",
391
406
  "AppSchemaChatChatMessage",
407
+ "AppSchemaResponsesMessageRole",
392
408
  "AudioBlock",
393
409
  "AutoTransformConfig",
394
410
  "AzureOpenAiEmbedding",
@@ -492,6 +508,7 @@ __all__ = [
492
508
  "ExtractAgentUpdateDataSchema",
493
509
  "ExtractAgentUpdateDataSchemaZeroValue",
494
510
  "ExtractConfig",
511
+ "ExtractConfigPriority",
495
512
  "ExtractJob",
496
513
  "ExtractJobCreate",
497
514
  "ExtractJobCreateBatchDataSchemaOverride",
@@ -511,6 +528,8 @@ __all__ = [
511
528
  "ExtractRunDataSchemaValue",
512
529
  "ExtractRunDataZeroValue",
513
530
  "ExtractRunExtractionMetadataValue",
531
+ "ExtractSchemaGenerateResponse",
532
+ "ExtractSchemaGenerateResponseDataSchemaValue",
514
533
  "ExtractSchemaValidateRequestDataSchema",
515
534
  "ExtractSchemaValidateRequestDataSchemaZeroValue",
516
535
  "ExtractSchemaValidateResponse",
@@ -563,19 +582,22 @@ __all__ = [
563
582
  "LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem_Document",
564
583
  "LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem_Image",
565
584
  "LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem_Text",
585
+ "LlamaIndexCoreBaseLlmsTypesMessageRole",
566
586
  "LlamaParseParameters",
587
+ "LlamaParseParametersPriority",
567
588
  "LlamaParseSupportedFileExtensions",
568
589
  "LlmModelData",
569
590
  "LlmParameters",
570
591
  "LoadFilesJobConfig",
571
592
  "ManagedIngestionStatus",
572
593
  "ManagedIngestionStatusResponse",
594
+ "Message",
573
595
  "MessageAnnotation",
574
- "MessageRole",
575
596
  "MetadataFilter",
576
597
  "MetadataFilterValue",
577
598
  "MetadataFilters",
578
599
  "MetadataFiltersFiltersItem",
600
+ "ModelConfiguration",
579
601
  "NodeRelationship",
580
602
  "NoneChunkingConfig",
581
603
  "NoneSegmentationConfig",
@@ -595,6 +617,7 @@ __all__ = [
595
617
  "PaginatedListPipelineFilesResponse",
596
618
  "PaginatedReportResponse",
597
619
  "ParseJobConfig",
620
+ "ParseJobConfigPriority",
598
621
  "ParsePlanLevel",
599
622
  "ParserLanguages",
600
623
  "ParsingHistoryItem",
@@ -713,6 +736,7 @@ __all__ = [
713
736
  "SupportedLlmModel",
714
737
  "SupportedLlmModelNames",
715
738
  "TextBlock",
739
+ "TextContentBlock",
716
740
  "TextNode",
717
741
  "TextNodeRelationshipsValue",
718
742
  "TextNodeWithScore",
@@ -741,11 +765,13 @@ __all__ = [
741
765
  "evals",
742
766
  "files",
743
767
  "jobs",
768
+ "llama_apps",
744
769
  "llama_extract",
745
770
  "organizations",
746
771
  "parsing",
747
772
  "pipelines",
748
773
  "projects",
749
774
  "reports",
775
+ "responses",
750
776
  "retrievers",
751
777
  ]
llama_cloud/client.py CHANGED
@@ -14,12 +14,14 @@ from .resources.embedding_model_configs.client import AsyncEmbeddingModelConfigs
14
14
  from .resources.evals.client import AsyncEvalsClient, EvalsClient
15
15
  from .resources.files.client import AsyncFilesClient, FilesClient
16
16
  from .resources.jobs.client import AsyncJobsClient, JobsClient
17
+ from .resources.llama_apps.client import AsyncLlamaAppsClient, LlamaAppsClient
17
18
  from .resources.llama_extract.client import AsyncLlamaExtractClient, LlamaExtractClient
18
19
  from .resources.organizations.client import AsyncOrganizationsClient, OrganizationsClient
19
20
  from .resources.parsing.client import AsyncParsingClient, ParsingClient
20
21
  from .resources.pipelines.client import AsyncPipelinesClient, PipelinesClient
21
22
  from .resources.projects.client import AsyncProjectsClient, ProjectsClient
22
23
  from .resources.reports.client import AsyncReportsClient, ReportsClient
24
+ from .resources.responses.client import AsyncResponsesClient, ResponsesClient
23
25
  from .resources.retrievers.client import AsyncRetrieversClient, RetrieversClient
24
26
 
25
27
 
@@ -45,11 +47,13 @@ class LlamaCloud:
45
47
  self.projects = ProjectsClient(client_wrapper=self._client_wrapper)
46
48
  self.files = FilesClient(client_wrapper=self._client_wrapper)
47
49
  self.pipelines = PipelinesClient(client_wrapper=self._client_wrapper)
50
+ self.responses = ResponsesClient(client_wrapper=self._client_wrapper)
48
51
  self.retrievers = RetrieversClient(client_wrapper=self._client_wrapper)
49
52
  self.jobs = JobsClient(client_wrapper=self._client_wrapper)
50
53
  self.evals = EvalsClient(client_wrapper=self._client_wrapper)
51
54
  self.parsing = ParsingClient(client_wrapper=self._client_wrapper)
52
55
  self.chat_apps = ChatAppsClient(client_wrapper=self._client_wrapper)
56
+ self.llama_apps = LlamaAppsClient(client_wrapper=self._client_wrapper)
53
57
  self.llama_extract = LlamaExtractClient(client_wrapper=self._client_wrapper)
54
58
  self.reports = ReportsClient(client_wrapper=self._client_wrapper)
55
59
  self.beta = BetaClient(client_wrapper=self._client_wrapper)
@@ -77,11 +81,13 @@ class AsyncLlamaCloud:
77
81
  self.projects = AsyncProjectsClient(client_wrapper=self._client_wrapper)
78
82
  self.files = AsyncFilesClient(client_wrapper=self._client_wrapper)
79
83
  self.pipelines = AsyncPipelinesClient(client_wrapper=self._client_wrapper)
84
+ self.responses = AsyncResponsesClient(client_wrapper=self._client_wrapper)
80
85
  self.retrievers = AsyncRetrieversClient(client_wrapper=self._client_wrapper)
81
86
  self.jobs = AsyncJobsClient(client_wrapper=self._client_wrapper)
82
87
  self.evals = AsyncEvalsClient(client_wrapper=self._client_wrapper)
83
88
  self.parsing = AsyncParsingClient(client_wrapper=self._client_wrapper)
84
89
  self.chat_apps = AsyncChatAppsClient(client_wrapper=self._client_wrapper)
90
+ self.llama_apps = AsyncLlamaAppsClient(client_wrapper=self._client_wrapper)
85
91
  self.llama_extract = AsyncLlamaExtractClient(client_wrapper=self._client_wrapper)
86
92
  self.reports = AsyncReportsClient(client_wrapper=self._client_wrapper)
87
93
  self.beta = AsyncBetaClient(client_wrapper=self._client_wrapper)
@@ -9,12 +9,14 @@ from . import (
9
9
  evals,
10
10
  files,
11
11
  jobs,
12
+ llama_apps,
12
13
  llama_extract,
13
14
  organizations,
14
15
  parsing,
15
16
  pipelines,
16
17
  projects,
17
18
  reports,
19
+ responses,
18
20
  retrievers,
19
21
  )
20
22
  from .data_sinks import DataSinkUpdateComponent
@@ -98,11 +100,13 @@ __all__ = [
98
100
  "evals",
99
101
  "files",
100
102
  "jobs",
103
+ "llama_apps",
101
104
  "llama_extract",
102
105
  "organizations",
103
106
  "parsing",
104
107
  "pipelines",
105
108
  "projects",
106
109
  "reports",
110
+ "responses",
107
111
  "retrievers",
108
112
  ]
@@ -114,7 +114,12 @@ class BetaClient:
114
114
 
115
115
  - completion_window: typing.Optional[int]. The time frame within which the batch should be processed. Currently only 24h is supported.
116
116
  ---
117
- from llama_cloud import FailPageMode, LlamaParseParameters, ParsingMode
117
+ from llama_cloud import (
118
+ FailPageMode,
119
+ LlamaParseParameters,
120
+ LlamaParseParametersPriority,
121
+ ParsingMode,
122
+ )
118
123
  from llama_cloud.client import LlamaCloud
119
124
 
120
125
  client = LlamaCloud(
@@ -123,6 +128,7 @@ class BetaClient:
123
128
  client.beta.create_batch(
124
129
  tool="string",
125
130
  tool_data=LlamaParseParameters(
131
+ priority=LlamaParseParametersPriority.LOW,
126
132
  parse_mode=ParsingMode.PARSE_PAGE_WITHOUT_LLM,
127
133
  replace_failed_page_mode=FailPageMode.RAW_TEXT,
128
134
  ),
@@ -286,7 +292,12 @@ class AsyncBetaClient:
286
292
 
287
293
  - completion_window: typing.Optional[int]. The time frame within which the batch should be processed. Currently only 24h is supported.
288
294
  ---
289
- from llama_cloud import FailPageMode, LlamaParseParameters, ParsingMode
295
+ from llama_cloud import (
296
+ FailPageMode,
297
+ LlamaParseParameters,
298
+ LlamaParseParametersPriority,
299
+ ParsingMode,
300
+ )
290
301
  from llama_cloud.client import AsyncLlamaCloud
291
302
 
292
303
  client = AsyncLlamaCloud(
@@ -295,6 +306,7 @@ class AsyncBetaClient:
295
306
  await client.beta.create_batch(
296
307
  tool="string",
297
308
  tool_data=LlamaParseParameters(
309
+ priority=LlamaParseParametersPriority.LOW,
298
310
  parse_mode=ParsingMode.PARSE_PAGE_WITHOUT_LLM,
299
311
  replace_failed_page_mode=FailPageMode.RAW_TEXT,
300
312
  ),
@@ -0,0 +1,2 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
@@ -0,0 +1,160 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import urllib.parse
4
+ from json.decoder import JSONDecodeError
5
+
6
+ from ...core.api_error import ApiError
7
+ from ...core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
8
+ from ...errors.unprocessable_entity_error import UnprocessableEntityError
9
+ from ...types.agent_deployment_list import AgentDeploymentList
10
+ from ...types.http_validation_error import HttpValidationError
11
+
12
+ try:
13
+ import pydantic
14
+ if pydantic.__version__.startswith("1."):
15
+ raise ImportError
16
+ import pydantic.v1 as pydantic # type: ignore
17
+ except ImportError:
18
+ import pydantic # type: ignore
19
+
20
+
21
+ class LlamaAppsClient:
22
+ def __init__(self, *, client_wrapper: SyncClientWrapper):
23
+ self._client_wrapper = client_wrapper
24
+
25
+ def list_deployments(self, project_id: str) -> AgentDeploymentList:
26
+ """
27
+ List all deployments for a project.
28
+
29
+ Parameters:
30
+ - project_id: str.
31
+ ---
32
+ from llama_cloud.client import LlamaCloud
33
+
34
+ client = LlamaCloud(
35
+ token="YOUR_TOKEN",
36
+ )
37
+ client.llama_apps.list_deployments(
38
+ project_id="string",
39
+ )
40
+ """
41
+ _response = self._client_wrapper.httpx_client.request(
42
+ "GET",
43
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/projects/{project_id}/agents"),
44
+ headers=self._client_wrapper.get_headers(),
45
+ timeout=60,
46
+ )
47
+ if 200 <= _response.status_code < 300:
48
+ return pydantic.parse_obj_as(AgentDeploymentList, _response.json()) # type: ignore
49
+ if _response.status_code == 422:
50
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
51
+ try:
52
+ _response_json = _response.json()
53
+ except JSONDecodeError:
54
+ raise ApiError(status_code=_response.status_code, body=_response.text)
55
+ raise ApiError(status_code=_response.status_code, body=_response_json)
56
+
57
+ def sync_deployments(self, project_id: str) -> AgentDeploymentList:
58
+ """
59
+ Sync deployments for a project.
60
+
61
+ Parameters:
62
+ - project_id: str.
63
+ ---
64
+ from llama_cloud.client import LlamaCloud
65
+
66
+ client = LlamaCloud(
67
+ token="YOUR_TOKEN",
68
+ )
69
+ client.llama_apps.sync_deployments(
70
+ project_id="string",
71
+ )
72
+ """
73
+ _response = self._client_wrapper.httpx_client.request(
74
+ "POST",
75
+ urllib.parse.urljoin(
76
+ f"{self._client_wrapper.get_base_url()}/", f"api/v1/projects/{project_id}/agents:sync"
77
+ ),
78
+ headers=self._client_wrapper.get_headers(),
79
+ timeout=60,
80
+ )
81
+ if 200 <= _response.status_code < 300:
82
+ return pydantic.parse_obj_as(AgentDeploymentList, _response.json()) # type: ignore
83
+ if _response.status_code == 422:
84
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
85
+ try:
86
+ _response_json = _response.json()
87
+ except JSONDecodeError:
88
+ raise ApiError(status_code=_response.status_code, body=_response.text)
89
+ raise ApiError(status_code=_response.status_code, body=_response_json)
90
+
91
+
92
+ class AsyncLlamaAppsClient:
93
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
94
+ self._client_wrapper = client_wrapper
95
+
96
+ async def list_deployments(self, project_id: str) -> AgentDeploymentList:
97
+ """
98
+ List all deployments for a project.
99
+
100
+ Parameters:
101
+ - project_id: str.
102
+ ---
103
+ from llama_cloud.client import AsyncLlamaCloud
104
+
105
+ client = AsyncLlamaCloud(
106
+ token="YOUR_TOKEN",
107
+ )
108
+ await client.llama_apps.list_deployments(
109
+ project_id="string",
110
+ )
111
+ """
112
+ _response = await self._client_wrapper.httpx_client.request(
113
+ "GET",
114
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/projects/{project_id}/agents"),
115
+ headers=self._client_wrapper.get_headers(),
116
+ timeout=60,
117
+ )
118
+ if 200 <= _response.status_code < 300:
119
+ return pydantic.parse_obj_as(AgentDeploymentList, _response.json()) # type: ignore
120
+ if _response.status_code == 422:
121
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
122
+ try:
123
+ _response_json = _response.json()
124
+ except JSONDecodeError:
125
+ raise ApiError(status_code=_response.status_code, body=_response.text)
126
+ raise ApiError(status_code=_response.status_code, body=_response_json)
127
+
128
+ async def sync_deployments(self, project_id: str) -> AgentDeploymentList:
129
+ """
130
+ Sync deployments for a project.
131
+
132
+ Parameters:
133
+ - project_id: str.
134
+ ---
135
+ from llama_cloud.client import AsyncLlamaCloud
136
+
137
+ client = AsyncLlamaCloud(
138
+ token="YOUR_TOKEN",
139
+ )
140
+ await client.llama_apps.sync_deployments(
141
+ project_id="string",
142
+ )
143
+ """
144
+ _response = await self._client_wrapper.httpx_client.request(
145
+ "POST",
146
+ urllib.parse.urljoin(
147
+ f"{self._client_wrapper.get_base_url()}/", f"api/v1/projects/{project_id}/agents:sync"
148
+ ),
149
+ headers=self._client_wrapper.get_headers(),
150
+ timeout=60,
151
+ )
152
+ if 200 <= _response.status_code < 300:
153
+ return pydantic.parse_obj_as(AgentDeploymentList, _response.json()) # type: ignore
154
+ if _response.status_code == 422:
155
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
156
+ try:
157
+ _response_json = _response.json()
158
+ except JSONDecodeError:
159
+ raise ApiError(status_code=_response.status_code, body=_response.text)
160
+ raise ApiError(status_code=_response.status_code, body=_response_json)