llama-cloud 0.1.21__py3-none-any.whl → 0.1.22__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of llama-cloud might be problematic. Click here for more details.
- llama_cloud/__init__.py +16 -12
- llama_cloud/client.py +3 -3
- llama_cloud/resources/__init__.py +2 -2
- llama_cloud/resources/admin/client.py +78 -0
- llama_cloud/resources/jobs/client.py +10 -2
- llama_cloud/resources/llama_extract/client.py +50 -6
- llama_cloud/resources/organizations/client.py +12 -2
- llama_cloud/resources/parsing/client.py +30 -0
- llama_cloud/resources/pipelines/client.py +8 -0
- llama_cloud/resources/retrievers/client.py +14 -0
- llama_cloud/types/__init__.py +14 -10
- llama_cloud/types/cloud_s_3_data_source.py +1 -0
- llama_cloud/types/{data_sink_definition.py → document_block.py} +6 -15
- llama_cloud/types/document_chunk_mode.py +17 -0
- llama_cloud/types/extract_config.py +4 -0
- llama_cloud/types/extract_mode.py +4 -0
- llama_cloud/types/extract_models.py +33 -0
- llama_cloud/types/llama_index_core_base_llms_types_chat_message_blocks_item.py +11 -0
- llama_cloud/types/{data_source_definition.py → llm_config_result.py} +6 -15
- llama_cloud/types/llm_config_result_llm_type.py +33 -0
- llama_cloud/types/llm_configs_response.py +33 -0
- llama_cloud/types/pipeline_create.py +1 -3
- llama_cloud/types/struct_parse_conf.py +2 -1
- llama_cloud/types/supported_llm_model_names.py +4 -4
- llama_cloud/types/user_organization_role.py +1 -0
- {llama_cloud-0.1.21.dist-info → llama_cloud-0.1.22.dist-info}/METADATA +1 -1
- {llama_cloud-0.1.21.dist-info → llama_cloud-0.1.22.dist-info}/RECORD +30 -29
- {llama_cloud-0.1.21.dist-info → llama_cloud-0.1.22.dist-info}/WHEEL +1 -1
- llama_cloud/resources/component_definitions/client.py +0 -189
- llama_cloud/types/configurable_transformation_definition.py +0 -48
- llama_cloud/types/configurable_transformation_names.py +0 -41
- llama_cloud/types/transformation_category_names.py +0 -17
- /llama_cloud/resources/{component_definitions → admin}/__init__.py +0 -0
- {llama_cloud-0.1.21.dist-info → llama_cloud-0.1.22.dist-info}/LICENSE +0 -0
llama_cloud/__init__.py
CHANGED
|
@@ -60,23 +60,21 @@ from .types import (
|
|
|
60
60
|
CompositeRetrievedTextNodeWithScore,
|
|
61
61
|
ConfigurableDataSinkNames,
|
|
62
62
|
ConfigurableDataSourceNames,
|
|
63
|
-
ConfigurableTransformationDefinition,
|
|
64
|
-
ConfigurableTransformationNames,
|
|
65
63
|
CreditType,
|
|
66
64
|
DataSink,
|
|
67
65
|
DataSinkComponent,
|
|
68
66
|
DataSinkCreate,
|
|
69
67
|
DataSinkCreateComponent,
|
|
70
|
-
DataSinkDefinition,
|
|
71
68
|
DataSource,
|
|
72
69
|
DataSourceComponent,
|
|
73
70
|
DataSourceCreate,
|
|
74
71
|
DataSourceCreateComponent,
|
|
75
72
|
DataSourceCreateCustomMetadataValue,
|
|
76
73
|
DataSourceCustomMetadataValue,
|
|
77
|
-
DataSourceDefinition,
|
|
78
74
|
DataSourceUpdateDispatcherConfig,
|
|
79
75
|
DeleteParams,
|
|
76
|
+
DocumentBlock,
|
|
77
|
+
DocumentChunkMode,
|
|
80
78
|
DocumentIngestionJobParams,
|
|
81
79
|
EditSuggestion,
|
|
82
80
|
EditSuggestionBlocksItem,
|
|
@@ -108,6 +106,7 @@ from .types import (
|
|
|
108
106
|
ExtractJobCreateDataSchemaOverride,
|
|
109
107
|
ExtractJobCreateDataSchemaOverrideZeroValue,
|
|
110
108
|
ExtractMode,
|
|
109
|
+
ExtractModels,
|
|
111
110
|
ExtractResultset,
|
|
112
111
|
ExtractResultsetData,
|
|
113
112
|
ExtractResultsetDataItemValue,
|
|
@@ -161,10 +160,14 @@ from .types import (
|
|
|
161
160
|
LlamaIndexCoreBaseLlmsTypesChatMessage,
|
|
162
161
|
LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem,
|
|
163
162
|
LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem_Audio,
|
|
163
|
+
LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem_Document,
|
|
164
164
|
LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem_Image,
|
|
165
165
|
LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem_Text,
|
|
166
166
|
LlamaParseParameters,
|
|
167
167
|
LlamaParseSupportedFileExtensions,
|
|
168
|
+
LlmConfigResult,
|
|
169
|
+
LlmConfigResultLlmType,
|
|
170
|
+
LlmConfigsResponse,
|
|
168
171
|
LlmModelData,
|
|
169
172
|
LlmParameters,
|
|
170
173
|
LoadFilesJobConfig,
|
|
@@ -304,7 +307,6 @@ from .types import (
|
|
|
304
307
|
TextNodeRelationshipsValue,
|
|
305
308
|
TextNodeWithScore,
|
|
306
309
|
TokenChunkingConfig,
|
|
307
|
-
TransformationCategoryNames,
|
|
308
310
|
UsageAndPlan,
|
|
309
311
|
UsageMetricResponse,
|
|
310
312
|
UsageResponse,
|
|
@@ -355,9 +357,9 @@ from .resources import (
|
|
|
355
357
|
PipelineUpdateEmbeddingConfig_VertexaiEmbedding,
|
|
356
358
|
PipelineUpdateTransformConfig,
|
|
357
359
|
UpdateReportPlanApiV1ReportsReportIdPlanPatchRequestAction,
|
|
360
|
+
admin,
|
|
358
361
|
beta,
|
|
359
362
|
chat_apps,
|
|
360
|
-
component_definitions,
|
|
361
363
|
data_sinks,
|
|
362
364
|
data_sources,
|
|
363
365
|
embedding_model_configs,
|
|
@@ -434,14 +436,11 @@ __all__ = [
|
|
|
434
436
|
"CompositeRetrievedTextNodeWithScore",
|
|
435
437
|
"ConfigurableDataSinkNames",
|
|
436
438
|
"ConfigurableDataSourceNames",
|
|
437
|
-
"ConfigurableTransformationDefinition",
|
|
438
|
-
"ConfigurableTransformationNames",
|
|
439
439
|
"CreditType",
|
|
440
440
|
"DataSink",
|
|
441
441
|
"DataSinkComponent",
|
|
442
442
|
"DataSinkCreate",
|
|
443
443
|
"DataSinkCreateComponent",
|
|
444
|
-
"DataSinkDefinition",
|
|
445
444
|
"DataSinkUpdateComponent",
|
|
446
445
|
"DataSource",
|
|
447
446
|
"DataSourceComponent",
|
|
@@ -449,11 +448,12 @@ __all__ = [
|
|
|
449
448
|
"DataSourceCreateComponent",
|
|
450
449
|
"DataSourceCreateCustomMetadataValue",
|
|
451
450
|
"DataSourceCustomMetadataValue",
|
|
452
|
-
"DataSourceDefinition",
|
|
453
451
|
"DataSourceUpdateComponent",
|
|
454
452
|
"DataSourceUpdateCustomMetadataValue",
|
|
455
453
|
"DataSourceUpdateDispatcherConfig",
|
|
456
454
|
"DeleteParams",
|
|
455
|
+
"DocumentBlock",
|
|
456
|
+
"DocumentChunkMode",
|
|
457
457
|
"DocumentIngestionJobParams",
|
|
458
458
|
"EditSuggestion",
|
|
459
459
|
"EditSuggestionBlocksItem",
|
|
@@ -499,6 +499,7 @@ __all__ = [
|
|
|
499
499
|
"ExtractJobCreateDataSchemaOverride",
|
|
500
500
|
"ExtractJobCreateDataSchemaOverrideZeroValue",
|
|
501
501
|
"ExtractMode",
|
|
502
|
+
"ExtractModels",
|
|
502
503
|
"ExtractResultset",
|
|
503
504
|
"ExtractResultsetData",
|
|
504
505
|
"ExtractResultsetDataItemValue",
|
|
@@ -558,10 +559,14 @@ __all__ = [
|
|
|
558
559
|
"LlamaIndexCoreBaseLlmsTypesChatMessage",
|
|
559
560
|
"LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem",
|
|
560
561
|
"LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem_Audio",
|
|
562
|
+
"LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem_Document",
|
|
561
563
|
"LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem_Image",
|
|
562
564
|
"LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem_Text",
|
|
563
565
|
"LlamaParseParameters",
|
|
564
566
|
"LlamaParseSupportedFileExtensions",
|
|
567
|
+
"LlmConfigResult",
|
|
568
|
+
"LlmConfigResultLlmType",
|
|
569
|
+
"LlmConfigsResponse",
|
|
565
570
|
"LlmModelData",
|
|
566
571
|
"LlmParameters",
|
|
567
572
|
"LoadFilesJobConfig",
|
|
@@ -711,7 +716,6 @@ __all__ = [
|
|
|
711
716
|
"TextNodeRelationshipsValue",
|
|
712
717
|
"TextNodeWithScore",
|
|
713
718
|
"TokenChunkingConfig",
|
|
714
|
-
"TransformationCategoryNames",
|
|
715
719
|
"UnprocessableEntityError",
|
|
716
720
|
"UpdateReportPlanApiV1ReportsReportIdPlanPatchRequestAction",
|
|
717
721
|
"UsageAndPlan",
|
|
@@ -728,9 +732,9 @@ __all__ = [
|
|
|
728
732
|
"VertexAiEmbeddingConfig",
|
|
729
733
|
"VertexEmbeddingMode",
|
|
730
734
|
"VertexTextEmbedding",
|
|
735
|
+
"admin",
|
|
731
736
|
"beta",
|
|
732
737
|
"chat_apps",
|
|
733
|
-
"component_definitions",
|
|
734
738
|
"data_sinks",
|
|
735
739
|
"data_sources",
|
|
736
740
|
"embedding_model_configs",
|
llama_cloud/client.py
CHANGED
|
@@ -6,9 +6,9 @@ import httpx
|
|
|
6
6
|
|
|
7
7
|
from .core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
|
|
8
8
|
from .environment import LlamaCloudEnvironment
|
|
9
|
+
from .resources.admin.client import AdminClient, AsyncAdminClient
|
|
9
10
|
from .resources.beta.client import AsyncBetaClient, BetaClient
|
|
10
11
|
from .resources.chat_apps.client import AsyncChatAppsClient, ChatAppsClient
|
|
11
|
-
from .resources.component_definitions.client import AsyncComponentDefinitionsClient, ComponentDefinitionsClient
|
|
12
12
|
from .resources.data_sinks.client import AsyncDataSinksClient, DataSinksClient
|
|
13
13
|
from .resources.data_sources.client import AsyncDataSourcesClient, DataSourcesClient
|
|
14
14
|
from .resources.embedding_model_configs.client import AsyncEmbeddingModelConfigsClient, EmbeddingModelConfigsClient
|
|
@@ -50,8 +50,8 @@ class LlamaCloud:
|
|
|
50
50
|
self.jobs = JobsClient(client_wrapper=self._client_wrapper)
|
|
51
51
|
self.evals = EvalsClient(client_wrapper=self._client_wrapper)
|
|
52
52
|
self.parsing = ParsingClient(client_wrapper=self._client_wrapper)
|
|
53
|
-
self.component_definitions = ComponentDefinitionsClient(client_wrapper=self._client_wrapper)
|
|
54
53
|
self.chat_apps = ChatAppsClient(client_wrapper=self._client_wrapper)
|
|
54
|
+
self.admin = AdminClient(client_wrapper=self._client_wrapper)
|
|
55
55
|
self.llama_extract = LlamaExtractClient(client_wrapper=self._client_wrapper)
|
|
56
56
|
self.reports = ReportsClient(client_wrapper=self._client_wrapper)
|
|
57
57
|
self.beta = BetaClient(client_wrapper=self._client_wrapper)
|
|
@@ -83,8 +83,8 @@ class AsyncLlamaCloud:
|
|
|
83
83
|
self.jobs = AsyncJobsClient(client_wrapper=self._client_wrapper)
|
|
84
84
|
self.evals = AsyncEvalsClient(client_wrapper=self._client_wrapper)
|
|
85
85
|
self.parsing = AsyncParsingClient(client_wrapper=self._client_wrapper)
|
|
86
|
-
self.component_definitions = AsyncComponentDefinitionsClient(client_wrapper=self._client_wrapper)
|
|
87
86
|
self.chat_apps = AsyncChatAppsClient(client_wrapper=self._client_wrapper)
|
|
87
|
+
self.admin = AsyncAdminClient(client_wrapper=self._client_wrapper)
|
|
88
88
|
self.llama_extract = AsyncLlamaExtractClient(client_wrapper=self._client_wrapper)
|
|
89
89
|
self.reports = AsyncReportsClient(client_wrapper=self._client_wrapper)
|
|
90
90
|
self.beta = AsyncBetaClient(client_wrapper=self._client_wrapper)
|
|
@@ -1,9 +1,9 @@
|
|
|
1
1
|
# This file was auto-generated by Fern from our API Definition.
|
|
2
2
|
|
|
3
3
|
from . import (
|
|
4
|
+
admin,
|
|
4
5
|
beta,
|
|
5
6
|
chat_apps,
|
|
6
|
-
component_definitions,
|
|
7
7
|
data_sinks,
|
|
8
8
|
data_sources,
|
|
9
9
|
embedding_model_configs,
|
|
@@ -89,9 +89,9 @@ __all__ = [
|
|
|
89
89
|
"PipelineUpdateEmbeddingConfig_VertexaiEmbedding",
|
|
90
90
|
"PipelineUpdateTransformConfig",
|
|
91
91
|
"UpdateReportPlanApiV1ReportsReportIdPlanPatchRequestAction",
|
|
92
|
+
"admin",
|
|
92
93
|
"beta",
|
|
93
94
|
"chat_apps",
|
|
94
|
-
"component_definitions",
|
|
95
95
|
"data_sinks",
|
|
96
96
|
"data_sources",
|
|
97
97
|
"embedding_model_configs",
|
|
@@ -0,0 +1,78 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
import urllib.parse
|
|
4
|
+
from json.decoder import JSONDecodeError
|
|
5
|
+
|
|
6
|
+
from ...core.api_error import ApiError
|
|
7
|
+
from ...core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
|
|
8
|
+
from ...errors.unprocessable_entity_error import UnprocessableEntityError
|
|
9
|
+
from ...types.http_validation_error import HttpValidationError
|
|
10
|
+
from ...types.llm_configs_response import LlmConfigsResponse
|
|
11
|
+
|
|
12
|
+
try:
|
|
13
|
+
import pydantic
|
|
14
|
+
if pydantic.__version__.startswith("1."):
|
|
15
|
+
raise ImportError
|
|
16
|
+
import pydantic.v1 as pydantic # type: ignore
|
|
17
|
+
except ImportError:
|
|
18
|
+
import pydantic # type: ignore
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class AdminClient:
|
|
22
|
+
def __init__(self, *, client_wrapper: SyncClientWrapper):
|
|
23
|
+
self._client_wrapper = client_wrapper
|
|
24
|
+
|
|
25
|
+
def get_llm_configs(self) -> LlmConfigsResponse:
|
|
26
|
+
"""
|
|
27
|
+
from llama_cloud.client import LlamaCloud
|
|
28
|
+
|
|
29
|
+
client = LlamaCloud(
|
|
30
|
+
token="YOUR_TOKEN",
|
|
31
|
+
)
|
|
32
|
+
client.admin.get_llm_configs()
|
|
33
|
+
"""
|
|
34
|
+
_response = self._client_wrapper.httpx_client.request(
|
|
35
|
+
"GET",
|
|
36
|
+
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/admin/llm-configs"),
|
|
37
|
+
headers=self._client_wrapper.get_headers(),
|
|
38
|
+
timeout=60,
|
|
39
|
+
)
|
|
40
|
+
if 200 <= _response.status_code < 300:
|
|
41
|
+
return pydantic.parse_obj_as(LlmConfigsResponse, _response.json()) # type: ignore
|
|
42
|
+
if _response.status_code == 422:
|
|
43
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
44
|
+
try:
|
|
45
|
+
_response_json = _response.json()
|
|
46
|
+
except JSONDecodeError:
|
|
47
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
48
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
class AsyncAdminClient:
|
|
52
|
+
def __init__(self, *, client_wrapper: AsyncClientWrapper):
|
|
53
|
+
self._client_wrapper = client_wrapper
|
|
54
|
+
|
|
55
|
+
async def get_llm_configs(self) -> LlmConfigsResponse:
|
|
56
|
+
"""
|
|
57
|
+
from llama_cloud.client import AsyncLlamaCloud
|
|
58
|
+
|
|
59
|
+
client = AsyncLlamaCloud(
|
|
60
|
+
token="YOUR_TOKEN",
|
|
61
|
+
)
|
|
62
|
+
await client.admin.get_llm_configs()
|
|
63
|
+
"""
|
|
64
|
+
_response = await self._client_wrapper.httpx_client.request(
|
|
65
|
+
"GET",
|
|
66
|
+
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/admin/llm-configs"),
|
|
67
|
+
headers=self._client_wrapper.get_headers(),
|
|
68
|
+
timeout=60,
|
|
69
|
+
)
|
|
70
|
+
if 200 <= _response.status_code < 300:
|
|
71
|
+
return pydantic.parse_obj_as(LlmConfigsResponse, _response.json()) # type: ignore
|
|
72
|
+
if _response.status_code == 422:
|
|
73
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
74
|
+
try:
|
|
75
|
+
_response_json = _response.json()
|
|
76
|
+
except JSONDecodeError:
|
|
77
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
78
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
@@ -37,6 +37,10 @@ class JobsClient:
|
|
|
37
37
|
"""
|
|
38
38
|
Get jobs for a project.
|
|
39
39
|
|
|
40
|
+
Note:
|
|
41
|
+
The include_usage_metrics parameter is deprecated and will be removed in a future version.
|
|
42
|
+
We've moved to usage v2 and this parameter will no longer return meaningful data.
|
|
43
|
+
|
|
40
44
|
Parameters:
|
|
41
45
|
- job_name: typing.Optional[str].
|
|
42
46
|
|
|
@@ -44,7 +48,7 @@ class JobsClient:
|
|
|
44
48
|
|
|
45
49
|
- offset: typing.Optional[int].
|
|
46
50
|
|
|
47
|
-
- include_usage_metrics: typing.Optional[bool].
|
|
51
|
+
- include_usage_metrics: typing.Optional[bool]. Deprecated: This parameter is no longer supported as we've moved to usage v2. It will be removed in a future version.
|
|
48
52
|
|
|
49
53
|
- project_id: typing.Optional[str].
|
|
50
54
|
|
|
@@ -101,6 +105,10 @@ class AsyncJobsClient:
|
|
|
101
105
|
"""
|
|
102
106
|
Get jobs for a project.
|
|
103
107
|
|
|
108
|
+
Note:
|
|
109
|
+
The include_usage_metrics parameter is deprecated and will be removed in a future version.
|
|
110
|
+
We've moved to usage v2 and this parameter will no longer return meaningful data.
|
|
111
|
+
|
|
104
112
|
Parameters:
|
|
105
113
|
- job_name: typing.Optional[str].
|
|
106
114
|
|
|
@@ -108,7 +116,7 @@ class AsyncJobsClient:
|
|
|
108
116
|
|
|
109
117
|
- offset: typing.Optional[int].
|
|
110
118
|
|
|
111
|
-
- include_usage_metrics: typing.Optional[bool].
|
|
119
|
+
- include_usage_metrics: typing.Optional[bool]. Deprecated: This parameter is no longer supported as we've moved to usage v2. It will be removed in a future version.
|
|
112
120
|
|
|
113
121
|
- project_id: typing.Optional[str].
|
|
114
122
|
|
|
@@ -92,7 +92,12 @@ class LlamaExtractClient:
|
|
|
92
92
|
|
|
93
93
|
- config: ExtractConfig. The configuration parameters for the extraction agent.
|
|
94
94
|
---
|
|
95
|
-
from llama_cloud import
|
|
95
|
+
from llama_cloud import (
|
|
96
|
+
DocumentChunkMode,
|
|
97
|
+
ExtractConfig,
|
|
98
|
+
ExtractMode,
|
|
99
|
+
ExtractTarget,
|
|
100
|
+
)
|
|
96
101
|
from llama_cloud.client import LlamaCloud
|
|
97
102
|
|
|
98
103
|
client = LlamaCloud(
|
|
@@ -103,6 +108,7 @@ class LlamaExtractClient:
|
|
|
103
108
|
config=ExtractConfig(
|
|
104
109
|
extraction_target=ExtractTarget.PER_DOC,
|
|
105
110
|
extraction_mode=ExtractMode.FAST,
|
|
111
|
+
chunk_mode=DocumentChunkMode.PAGE,
|
|
106
112
|
),
|
|
107
113
|
)
|
|
108
114
|
"""
|
|
@@ -238,7 +244,12 @@ class LlamaExtractClient:
|
|
|
238
244
|
|
|
239
245
|
- config: ExtractConfig. The configuration parameters for the extraction agent.
|
|
240
246
|
---
|
|
241
|
-
from llama_cloud import
|
|
247
|
+
from llama_cloud import (
|
|
248
|
+
DocumentChunkMode,
|
|
249
|
+
ExtractConfig,
|
|
250
|
+
ExtractMode,
|
|
251
|
+
ExtractTarget,
|
|
252
|
+
)
|
|
242
253
|
from llama_cloud.client import LlamaCloud
|
|
243
254
|
|
|
244
255
|
client = LlamaCloud(
|
|
@@ -249,6 +260,7 @@ class LlamaExtractClient:
|
|
|
249
260
|
config=ExtractConfig(
|
|
250
261
|
extraction_target=ExtractTarget.PER_DOC,
|
|
251
262
|
extraction_mode=ExtractMode.FAST,
|
|
263
|
+
chunk_mode=DocumentChunkMode.PAGE,
|
|
252
264
|
),
|
|
253
265
|
)
|
|
254
266
|
"""
|
|
@@ -342,6 +354,7 @@ class LlamaExtractClient:
|
|
|
342
354
|
- request: ExtractJobCreate.
|
|
343
355
|
---
|
|
344
356
|
from llama_cloud import (
|
|
357
|
+
DocumentChunkMode,
|
|
345
358
|
ExtractConfig,
|
|
346
359
|
ExtractJobCreate,
|
|
347
360
|
ExtractMode,
|
|
@@ -359,6 +372,7 @@ class LlamaExtractClient:
|
|
|
359
372
|
config_override=ExtractConfig(
|
|
360
373
|
extraction_target=ExtractTarget.PER_DOC,
|
|
361
374
|
extraction_mode=ExtractMode.FAST,
|
|
375
|
+
chunk_mode=DocumentChunkMode.PAGE,
|
|
362
376
|
),
|
|
363
377
|
),
|
|
364
378
|
)
|
|
@@ -428,6 +442,7 @@ class LlamaExtractClient:
|
|
|
428
442
|
---
|
|
429
443
|
from llama_cloud import (
|
|
430
444
|
ChunkMode,
|
|
445
|
+
DocumentChunkMode,
|
|
431
446
|
ExtractConfig,
|
|
432
447
|
ExtractJobCreate,
|
|
433
448
|
ExtractMode,
|
|
@@ -449,6 +464,7 @@ class LlamaExtractClient:
|
|
|
449
464
|
config_override=ExtractConfig(
|
|
450
465
|
extraction_target=ExtractTarget.PER_DOC,
|
|
451
466
|
extraction_mode=ExtractMode.FAST,
|
|
467
|
+
chunk_mode=DocumentChunkMode.PAGE,
|
|
452
468
|
),
|
|
453
469
|
),
|
|
454
470
|
extract_settings=LlamaExtractSettings(
|
|
@@ -548,7 +564,12 @@ class LlamaExtractClient:
|
|
|
548
564
|
|
|
549
565
|
- config_override: typing.Optional[ExtractConfig].
|
|
550
566
|
---
|
|
551
|
-
from llama_cloud import
|
|
567
|
+
from llama_cloud import (
|
|
568
|
+
DocumentChunkMode,
|
|
569
|
+
ExtractConfig,
|
|
570
|
+
ExtractMode,
|
|
571
|
+
ExtractTarget,
|
|
572
|
+
)
|
|
552
573
|
from llama_cloud.client import LlamaCloud
|
|
553
574
|
|
|
554
575
|
client = LlamaCloud(
|
|
@@ -560,6 +581,7 @@ class LlamaExtractClient:
|
|
|
560
581
|
config_override=ExtractConfig(
|
|
561
582
|
extraction_target=ExtractTarget.PER_DOC,
|
|
562
583
|
extraction_mode=ExtractMode.FAST,
|
|
584
|
+
chunk_mode=DocumentChunkMode.PAGE,
|
|
563
585
|
),
|
|
564
586
|
)
|
|
565
587
|
"""
|
|
@@ -831,7 +853,12 @@ class AsyncLlamaExtractClient:
|
|
|
831
853
|
|
|
832
854
|
- config: ExtractConfig. The configuration parameters for the extraction agent.
|
|
833
855
|
---
|
|
834
|
-
from llama_cloud import
|
|
856
|
+
from llama_cloud import (
|
|
857
|
+
DocumentChunkMode,
|
|
858
|
+
ExtractConfig,
|
|
859
|
+
ExtractMode,
|
|
860
|
+
ExtractTarget,
|
|
861
|
+
)
|
|
835
862
|
from llama_cloud.client import AsyncLlamaCloud
|
|
836
863
|
|
|
837
864
|
client = AsyncLlamaCloud(
|
|
@@ -842,6 +869,7 @@ class AsyncLlamaExtractClient:
|
|
|
842
869
|
config=ExtractConfig(
|
|
843
870
|
extraction_target=ExtractTarget.PER_DOC,
|
|
844
871
|
extraction_mode=ExtractMode.FAST,
|
|
872
|
+
chunk_mode=DocumentChunkMode.PAGE,
|
|
845
873
|
),
|
|
846
874
|
)
|
|
847
875
|
"""
|
|
@@ -977,7 +1005,12 @@ class AsyncLlamaExtractClient:
|
|
|
977
1005
|
|
|
978
1006
|
- config: ExtractConfig. The configuration parameters for the extraction agent.
|
|
979
1007
|
---
|
|
980
|
-
from llama_cloud import
|
|
1008
|
+
from llama_cloud import (
|
|
1009
|
+
DocumentChunkMode,
|
|
1010
|
+
ExtractConfig,
|
|
1011
|
+
ExtractMode,
|
|
1012
|
+
ExtractTarget,
|
|
1013
|
+
)
|
|
981
1014
|
from llama_cloud.client import AsyncLlamaCloud
|
|
982
1015
|
|
|
983
1016
|
client = AsyncLlamaCloud(
|
|
@@ -988,6 +1021,7 @@ class AsyncLlamaExtractClient:
|
|
|
988
1021
|
config=ExtractConfig(
|
|
989
1022
|
extraction_target=ExtractTarget.PER_DOC,
|
|
990
1023
|
extraction_mode=ExtractMode.FAST,
|
|
1024
|
+
chunk_mode=DocumentChunkMode.PAGE,
|
|
991
1025
|
),
|
|
992
1026
|
)
|
|
993
1027
|
"""
|
|
@@ -1081,6 +1115,7 @@ class AsyncLlamaExtractClient:
|
|
|
1081
1115
|
- request: ExtractJobCreate.
|
|
1082
1116
|
---
|
|
1083
1117
|
from llama_cloud import (
|
|
1118
|
+
DocumentChunkMode,
|
|
1084
1119
|
ExtractConfig,
|
|
1085
1120
|
ExtractJobCreate,
|
|
1086
1121
|
ExtractMode,
|
|
@@ -1098,6 +1133,7 @@ class AsyncLlamaExtractClient:
|
|
|
1098
1133
|
config_override=ExtractConfig(
|
|
1099
1134
|
extraction_target=ExtractTarget.PER_DOC,
|
|
1100
1135
|
extraction_mode=ExtractMode.FAST,
|
|
1136
|
+
chunk_mode=DocumentChunkMode.PAGE,
|
|
1101
1137
|
),
|
|
1102
1138
|
),
|
|
1103
1139
|
)
|
|
@@ -1167,6 +1203,7 @@ class AsyncLlamaExtractClient:
|
|
|
1167
1203
|
---
|
|
1168
1204
|
from llama_cloud import (
|
|
1169
1205
|
ChunkMode,
|
|
1206
|
+
DocumentChunkMode,
|
|
1170
1207
|
ExtractConfig,
|
|
1171
1208
|
ExtractJobCreate,
|
|
1172
1209
|
ExtractMode,
|
|
@@ -1188,6 +1225,7 @@ class AsyncLlamaExtractClient:
|
|
|
1188
1225
|
config_override=ExtractConfig(
|
|
1189
1226
|
extraction_target=ExtractTarget.PER_DOC,
|
|
1190
1227
|
extraction_mode=ExtractMode.FAST,
|
|
1228
|
+
chunk_mode=DocumentChunkMode.PAGE,
|
|
1191
1229
|
),
|
|
1192
1230
|
),
|
|
1193
1231
|
extract_settings=LlamaExtractSettings(
|
|
@@ -1287,7 +1325,12 @@ class AsyncLlamaExtractClient:
|
|
|
1287
1325
|
|
|
1288
1326
|
- config_override: typing.Optional[ExtractConfig].
|
|
1289
1327
|
---
|
|
1290
|
-
from llama_cloud import
|
|
1328
|
+
from llama_cloud import (
|
|
1329
|
+
DocumentChunkMode,
|
|
1330
|
+
ExtractConfig,
|
|
1331
|
+
ExtractMode,
|
|
1332
|
+
ExtractTarget,
|
|
1333
|
+
)
|
|
1291
1334
|
from llama_cloud.client import AsyncLlamaCloud
|
|
1292
1335
|
|
|
1293
1336
|
client = AsyncLlamaCloud(
|
|
@@ -1299,6 +1342,7 @@ class AsyncLlamaExtractClient:
|
|
|
1299
1342
|
config_override=ExtractConfig(
|
|
1300
1343
|
extraction_target=ExtractTarget.PER_DOC,
|
|
1301
1344
|
extraction_mode=ExtractMode.FAST,
|
|
1345
|
+
chunk_mode=DocumentChunkMode.PAGE,
|
|
1302
1346
|
),
|
|
1303
1347
|
)
|
|
1304
1348
|
"""
|
|
@@ -520,12 +520,16 @@ class OrganizationsClient:
|
|
|
520
520
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
521
521
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
522
522
|
|
|
523
|
-
def get_user_role(
|
|
523
|
+
def get_user_role(
|
|
524
|
+
self, organization_id: str, *, project_id: typing.Optional[str] = None
|
|
525
|
+
) -> typing.Optional[UserOrganizationRole]:
|
|
524
526
|
"""
|
|
525
527
|
Get the role of a user in an organization.
|
|
526
528
|
|
|
527
529
|
Parameters:
|
|
528
530
|
- organization_id: str.
|
|
531
|
+
|
|
532
|
+
- project_id: typing.Optional[str].
|
|
529
533
|
---
|
|
530
534
|
from llama_cloud.client import LlamaCloud
|
|
531
535
|
|
|
@@ -541,6 +545,7 @@ class OrganizationsClient:
|
|
|
541
545
|
urllib.parse.urljoin(
|
|
542
546
|
f"{self._client_wrapper.get_base_url()}/", f"api/v1/organizations/{organization_id}/users/roles"
|
|
543
547
|
),
|
|
548
|
+
params=remove_none_from_dict({"project_id": project_id}),
|
|
544
549
|
headers=self._client_wrapper.get_headers(),
|
|
545
550
|
timeout=60,
|
|
546
551
|
)
|
|
@@ -1216,12 +1221,16 @@ class AsyncOrganizationsClient:
|
|
|
1216
1221
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
1217
1222
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
1218
1223
|
|
|
1219
|
-
async def get_user_role(
|
|
1224
|
+
async def get_user_role(
|
|
1225
|
+
self, organization_id: str, *, project_id: typing.Optional[str] = None
|
|
1226
|
+
) -> typing.Optional[UserOrganizationRole]:
|
|
1220
1227
|
"""
|
|
1221
1228
|
Get the role of a user in an organization.
|
|
1222
1229
|
|
|
1223
1230
|
Parameters:
|
|
1224
1231
|
- organization_id: str.
|
|
1232
|
+
|
|
1233
|
+
- project_id: typing.Optional[str].
|
|
1225
1234
|
---
|
|
1226
1235
|
from llama_cloud.client import AsyncLlamaCloud
|
|
1227
1236
|
|
|
@@ -1237,6 +1246,7 @@ class AsyncOrganizationsClient:
|
|
|
1237
1246
|
urllib.parse.urljoin(
|
|
1238
1247
|
f"{self._client_wrapper.get_base_url()}/", f"api/v1/organizations/{organization_id}/users/roles"
|
|
1239
1248
|
),
|
|
1249
|
+
params=remove_none_from_dict({"project_id": project_id}),
|
|
1240
1250
|
headers=self._client_wrapper.get_headers(),
|
|
1241
1251
|
timeout=60,
|
|
1242
1252
|
)
|
|
@@ -667,6 +667,9 @@ class ParsingClient:
|
|
|
667
667
|
"""
|
|
668
668
|
Get a job by id
|
|
669
669
|
|
|
670
|
+
Note: The 'credits_used' and 'job_credits_usage' fields in the response metadata are deprecated
|
|
671
|
+
and will be removed in a future release.
|
|
672
|
+
|
|
670
673
|
Parameters:
|
|
671
674
|
- job_id: str.
|
|
672
675
|
|
|
@@ -738,6 +741,9 @@ class ParsingClient:
|
|
|
738
741
|
"""
|
|
739
742
|
Get a job by id
|
|
740
743
|
|
|
744
|
+
Note: The 'credits_used' and 'job_credits_usage' fields in the response metadata are deprecated
|
|
745
|
+
and will be removed in a future release.
|
|
746
|
+
|
|
741
747
|
Parameters:
|
|
742
748
|
- job_id: str.
|
|
743
749
|
|
|
@@ -843,6 +849,9 @@ class ParsingClient:
|
|
|
843
849
|
"""
|
|
844
850
|
Get a job by id
|
|
845
851
|
|
|
852
|
+
Note: The 'credits_used' and 'job_credits_usage' fields in the response metadata are deprecated
|
|
853
|
+
and will be removed in a future release.
|
|
854
|
+
|
|
846
855
|
Parameters:
|
|
847
856
|
- job_id: str.
|
|
848
857
|
|
|
@@ -914,6 +923,9 @@ class ParsingClient:
|
|
|
914
923
|
"""
|
|
915
924
|
Get a job by id
|
|
916
925
|
|
|
926
|
+
Note: The 'credits_used' and 'job_credits_usage' fields in the response metadata are deprecated
|
|
927
|
+
and will be removed in a future release.
|
|
928
|
+
|
|
917
929
|
Parameters:
|
|
918
930
|
- job_id: str.
|
|
919
931
|
|
|
@@ -983,6 +995,9 @@ class ParsingClient:
|
|
|
983
995
|
"""
|
|
984
996
|
Get parsing history for user
|
|
985
997
|
|
|
998
|
+
This endpoint is deprecated.
|
|
999
|
+
Use /api/v1/jobs/?job_name=parsing&project_id=YOUR_PROJECT_ID instead.
|
|
1000
|
+
|
|
986
1001
|
---
|
|
987
1002
|
from llama_cloud.client import LlamaCloud
|
|
988
1003
|
|
|
@@ -1680,6 +1695,9 @@ class AsyncParsingClient:
|
|
|
1680
1695
|
"""
|
|
1681
1696
|
Get a job by id
|
|
1682
1697
|
|
|
1698
|
+
Note: The 'credits_used' and 'job_credits_usage' fields in the response metadata are deprecated
|
|
1699
|
+
and will be removed in a future release.
|
|
1700
|
+
|
|
1683
1701
|
Parameters:
|
|
1684
1702
|
- job_id: str.
|
|
1685
1703
|
|
|
@@ -1751,6 +1769,9 @@ class AsyncParsingClient:
|
|
|
1751
1769
|
"""
|
|
1752
1770
|
Get a job by id
|
|
1753
1771
|
|
|
1772
|
+
Note: The 'credits_used' and 'job_credits_usage' fields in the response metadata are deprecated
|
|
1773
|
+
and will be removed in a future release.
|
|
1774
|
+
|
|
1754
1775
|
Parameters:
|
|
1755
1776
|
- job_id: str.
|
|
1756
1777
|
|
|
@@ -1858,6 +1879,9 @@ class AsyncParsingClient:
|
|
|
1858
1879
|
"""
|
|
1859
1880
|
Get a job by id
|
|
1860
1881
|
|
|
1882
|
+
Note: The 'credits_used' and 'job_credits_usage' fields in the response metadata are deprecated
|
|
1883
|
+
and will be removed in a future release.
|
|
1884
|
+
|
|
1861
1885
|
Parameters:
|
|
1862
1886
|
- job_id: str.
|
|
1863
1887
|
|
|
@@ -1931,6 +1955,9 @@ class AsyncParsingClient:
|
|
|
1931
1955
|
"""
|
|
1932
1956
|
Get a job by id
|
|
1933
1957
|
|
|
1958
|
+
Note: The 'credits_used' and 'job_credits_usage' fields in the response metadata are deprecated
|
|
1959
|
+
and will be removed in a future release.
|
|
1960
|
+
|
|
1934
1961
|
Parameters:
|
|
1935
1962
|
- job_id: str.
|
|
1936
1963
|
|
|
@@ -2000,6 +2027,9 @@ class AsyncParsingClient:
|
|
|
2000
2027
|
"""
|
|
2001
2028
|
Get parsing history for user
|
|
2002
2029
|
|
|
2030
|
+
This endpoint is deprecated.
|
|
2031
|
+
Use /api/v1/jobs/?job_name=parsing&project_id=YOUR_PROJECT_ID instead.
|
|
2032
|
+
|
|
2003
2033
|
---
|
|
2004
2034
|
from llama_cloud.client import AsyncLlamaCloud
|
|
2005
2035
|
|
|
@@ -517,6 +517,7 @@ class PipelinesClient:
|
|
|
517
517
|
only_manually_uploaded: typing.Optional[bool] = None,
|
|
518
518
|
limit: typing.Optional[int] = None,
|
|
519
519
|
offset: typing.Optional[int] = None,
|
|
520
|
+
order_by: typing.Optional[str] = None,
|
|
520
521
|
) -> PaginatedListPipelineFilesResponse:
|
|
521
522
|
"""
|
|
522
523
|
Get files for a pipeline.
|
|
@@ -531,6 +532,8 @@ class PipelinesClient:
|
|
|
531
532
|
- limit: typing.Optional[int].
|
|
532
533
|
|
|
533
534
|
- offset: typing.Optional[int].
|
|
535
|
+
|
|
536
|
+
- order_by: typing.Optional[str].
|
|
534
537
|
---
|
|
535
538
|
from llama_cloud.client import LlamaCloud
|
|
536
539
|
|
|
@@ -550,6 +553,7 @@ class PipelinesClient:
|
|
|
550
553
|
"only_manually_uploaded": only_manually_uploaded,
|
|
551
554
|
"limit": limit,
|
|
552
555
|
"offset": offset,
|
|
556
|
+
"order_by": order_by,
|
|
553
557
|
}
|
|
554
558
|
),
|
|
555
559
|
headers=self._client_wrapper.get_headers(),
|
|
@@ -2131,6 +2135,7 @@ class AsyncPipelinesClient:
|
|
|
2131
2135
|
only_manually_uploaded: typing.Optional[bool] = None,
|
|
2132
2136
|
limit: typing.Optional[int] = None,
|
|
2133
2137
|
offset: typing.Optional[int] = None,
|
|
2138
|
+
order_by: typing.Optional[str] = None,
|
|
2134
2139
|
) -> PaginatedListPipelineFilesResponse:
|
|
2135
2140
|
"""
|
|
2136
2141
|
Get files for a pipeline.
|
|
@@ -2145,6 +2150,8 @@ class AsyncPipelinesClient:
|
|
|
2145
2150
|
- limit: typing.Optional[int].
|
|
2146
2151
|
|
|
2147
2152
|
- offset: typing.Optional[int].
|
|
2153
|
+
|
|
2154
|
+
- order_by: typing.Optional[str].
|
|
2148
2155
|
---
|
|
2149
2156
|
from llama_cloud.client import AsyncLlamaCloud
|
|
2150
2157
|
|
|
@@ -2164,6 +2171,7 @@ class AsyncPipelinesClient:
|
|
|
2164
2171
|
"only_manually_uploaded": only_manually_uploaded,
|
|
2165
2172
|
"limit": limit,
|
|
2166
2173
|
"offset": offset,
|
|
2174
|
+
"order_by": order_by,
|
|
2167
2175
|
}
|
|
2168
2176
|
),
|
|
2169
2177
|
headers=self._client_wrapper.get_headers(),
|