llama-cloud 0.1.6__py3-none-any.whl → 0.1.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of llama-cloud might be problematic. Click here for more details.
- llama_cloud/__init__.py +140 -6
- llama_cloud/client.py +15 -0
- llama_cloud/environment.py +1 -1
- llama_cloud/resources/__init__.py +15 -0
- llama_cloud/{types/token.py → resources/chat_apps/__init__.py} +0 -3
- llama_cloud/resources/chat_apps/client.py +630 -0
- llama_cloud/resources/data_sinks/client.py +12 -12
- llama_cloud/resources/data_sources/client.py +14 -14
- llama_cloud/resources/embedding_model_configs/client.py +20 -76
- llama_cloud/resources/evals/client.py +26 -36
- llama_cloud/resources/extraction/client.py +32 -32
- llama_cloud/resources/files/client.py +40 -44
- llama_cloud/resources/jobs/__init__.py +2 -0
- llama_cloud/resources/jobs/client.py +148 -0
- llama_cloud/resources/llama_extract/__init__.py +5 -0
- llama_cloud/resources/llama_extract/client.py +1038 -0
- llama_cloud/resources/llama_extract/types/__init__.py +6 -0
- llama_cloud/resources/llama_extract/types/extract_agent_create_data_schema_value.py +7 -0
- llama_cloud/resources/llama_extract/types/extract_agent_update_data_schema_value.py +7 -0
- llama_cloud/resources/organizations/client.py +66 -70
- llama_cloud/resources/parsing/client.py +448 -428
- llama_cloud/resources/pipelines/client.py +256 -344
- llama_cloud/resources/projects/client.py +34 -60
- llama_cloud/resources/reports/__init__.py +5 -0
- llama_cloud/resources/reports/client.py +1198 -0
- llama_cloud/resources/reports/types/__init__.py +7 -0
- llama_cloud/resources/reports/types/update_report_plan_api_v_1_reports_report_id_plan_patch_request_action.py +25 -0
- llama_cloud/resources/retrievers/__init__.py +2 -0
- llama_cloud/resources/retrievers/client.py +654 -0
- llama_cloud/types/__init__.py +128 -6
- llama_cloud/types/{chat_message.py → app_schema_chat_chat_message.py} +3 -3
- llama_cloud/types/azure_open_ai_embedding.py +6 -12
- llama_cloud/types/base_prompt_template.py +2 -6
- llama_cloud/types/bedrock_embedding.py +6 -12
- llama_cloud/types/character_splitter.py +2 -4
- llama_cloud/types/chat_app.py +44 -0
- llama_cloud/types/chat_app_response.py +41 -0
- llama_cloud/types/cloud_az_storage_blob_data_source.py +7 -15
- llama_cloud/types/cloud_box_data_source.py +6 -12
- llama_cloud/types/cloud_confluence_data_source.py +6 -6
- llama_cloud/types/cloud_document.py +1 -3
- llama_cloud/types/cloud_document_create.py +1 -3
- llama_cloud/types/cloud_jira_data_source.py +4 -6
- llama_cloud/types/cloud_notion_page_data_source.py +2 -2
- llama_cloud/types/cloud_one_drive_data_source.py +3 -5
- llama_cloud/types/cloud_postgres_vector_store.py +1 -0
- llama_cloud/types/cloud_s_3_data_source.py +4 -8
- llama_cloud/types/cloud_sharepoint_data_source.py +6 -8
- llama_cloud/types/cloud_slack_data_source.py +6 -6
- llama_cloud/types/code_splitter.py +1 -1
- llama_cloud/types/cohere_embedding.py +3 -7
- llama_cloud/types/composite_retrieval_mode.py +21 -0
- llama_cloud/types/composite_retrieval_result.py +38 -0
- llama_cloud/types/composite_retrieved_text_node.py +42 -0
- llama_cloud/types/data_sink.py +4 -4
- llama_cloud/types/data_sink_component.py +20 -0
- llama_cloud/types/data_source.py +5 -7
- llama_cloud/types/data_source_component.py +28 -0
- llama_cloud/types/data_source_create.py +1 -3
- llama_cloud/types/edit_suggestion.py +39 -0
- llama_cloud/types/embedding_model_config.py +2 -2
- llama_cloud/types/embedding_model_config_update.py +2 -4
- llama_cloud/types/eval_dataset.py +2 -2
- llama_cloud/types/eval_dataset_job_record.py +8 -13
- llama_cloud/types/eval_execution_params_override.py +2 -6
- llama_cloud/types/eval_question.py +2 -2
- llama_cloud/types/extract_agent.py +45 -0
- llama_cloud/types/extract_agent_data_schema_value.py +5 -0
- llama_cloud/types/extract_config.py +40 -0
- llama_cloud/types/extract_job.py +35 -0
- llama_cloud/types/extract_job_create.py +40 -0
- llama_cloud/types/extract_job_create_data_schema_override_value.py +7 -0
- llama_cloud/types/extract_mode.py +17 -0
- llama_cloud/types/extract_resultset.py +46 -0
- llama_cloud/types/extract_resultset_data.py +11 -0
- llama_cloud/types/extract_resultset_data_item_value.py +7 -0
- llama_cloud/types/extract_resultset_data_zero_value.py +7 -0
- llama_cloud/types/extract_resultset_extraction_metadata_value.py +7 -0
- llama_cloud/types/extraction_result.py +2 -2
- llama_cloud/types/extraction_schema.py +3 -5
- llama_cloud/types/file.py +9 -14
- llama_cloud/types/filter_condition.py +9 -1
- llama_cloud/types/filter_operator.py +6 -2
- llama_cloud/types/gemini_embedding.py +6 -10
- llama_cloud/types/hugging_face_inference_api_embedding.py +11 -27
- llama_cloud/types/hugging_face_inference_api_embedding_token.py +5 -0
- llama_cloud/types/image_block.py +35 -0
- llama_cloud/types/input_message.py +2 -4
- llama_cloud/types/job_names.py +89 -0
- llama_cloud/types/job_record.py +57 -0
- llama_cloud/types/job_record_with_usage_metrics.py +36 -0
- llama_cloud/types/llama_index_core_base_llms_types_chat_message.py +39 -0
- llama_cloud/types/llama_index_core_base_llms_types_chat_message_blocks_item.py +33 -0
- llama_cloud/types/llama_parse_parameters.py +4 -0
- llama_cloud/types/llm.py +3 -4
- llama_cloud/types/llm_model_data.py +1 -0
- llama_cloud/types/llm_parameters.py +3 -5
- llama_cloud/types/local_eval.py +8 -10
- llama_cloud/types/local_eval_results.py +1 -1
- llama_cloud/types/managed_ingestion_status.py +4 -0
- llama_cloud/types/managed_ingestion_status_response.py +4 -5
- llama_cloud/types/markdown_element_node_parser.py +3 -5
- llama_cloud/types/markdown_node_parser.py +1 -1
- llama_cloud/types/metadata_filter.py +2 -2
- llama_cloud/types/metadata_filter_value.py +5 -0
- llama_cloud/types/metric_result.py +3 -3
- llama_cloud/types/node_parser.py +1 -1
- llama_cloud/types/object_type.py +4 -0
- llama_cloud/types/open_ai_embedding.py +6 -12
- llama_cloud/types/organization.py +7 -2
- llama_cloud/types/page_splitter_node_parser.py +2 -2
- llama_cloud/types/paginated_jobs_history_with_metrics.py +35 -0
- llama_cloud/types/paginated_report_response.py +35 -0
- llama_cloud/types/parse_plan_level.py +21 -0
- llama_cloud/types/permission.py +3 -3
- llama_cloud/types/pipeline.py +7 -17
- llama_cloud/types/pipeline_configuration_hashes.py +3 -3
- llama_cloud/types/pipeline_create.py +8 -16
- llama_cloud/types/pipeline_data_source.py +7 -13
- llama_cloud/types/pipeline_data_source_component.py +28 -0
- llama_cloud/types/pipeline_data_source_create.py +1 -3
- llama_cloud/types/pipeline_deployment.py +4 -4
- llama_cloud/types/pipeline_file.py +13 -24
- llama_cloud/types/pipeline_file_create.py +1 -3
- llama_cloud/types/playground_session.py +4 -4
- llama_cloud/types/preset_retrieval_params.py +8 -14
- llama_cloud/types/presigned_url.py +1 -3
- llama_cloud/types/progress_event.py +44 -0
- llama_cloud/types/progress_event_status.py +33 -0
- llama_cloud/types/project.py +2 -2
- llama_cloud/types/prompt_mixin_prompts.py +1 -1
- llama_cloud/types/prompt_spec.py +3 -5
- llama_cloud/types/related_node_info.py +2 -2
- llama_cloud/types/related_node_info_node_type.py +7 -0
- llama_cloud/types/report.py +33 -0
- llama_cloud/types/report_block.py +34 -0
- llama_cloud/types/report_block_dependency.py +29 -0
- llama_cloud/types/report_create_response.py +31 -0
- llama_cloud/types/report_event_item.py +40 -0
- llama_cloud/types/report_event_item_event_data.py +45 -0
- llama_cloud/types/report_event_type.py +37 -0
- llama_cloud/types/report_metadata.py +43 -0
- llama_cloud/types/report_plan.py +36 -0
- llama_cloud/types/report_plan_block.py +36 -0
- llama_cloud/types/report_query.py +33 -0
- llama_cloud/types/report_response.py +41 -0
- llama_cloud/types/report_state.py +37 -0
- llama_cloud/types/report_state_event.py +38 -0
- llama_cloud/types/report_update_event.py +38 -0
- llama_cloud/types/retrieve_results.py +1 -1
- llama_cloud/types/retriever.py +45 -0
- llama_cloud/types/retriever_create.py +37 -0
- llama_cloud/types/retriever_pipeline.py +37 -0
- llama_cloud/types/role.py +3 -3
- llama_cloud/types/sentence_splitter.py +2 -4
- llama_cloud/types/status_enum.py +4 -0
- llama_cloud/types/supported_llm_model_names.py +4 -0
- llama_cloud/types/text_block.py +31 -0
- llama_cloud/types/text_node.py +15 -8
- llama_cloud/types/token_text_splitter.py +1 -1
- llama_cloud/types/usage_metric_response.py +34 -0
- llama_cloud/types/user_job_record.py +32 -0
- llama_cloud/types/user_organization.py +5 -9
- llama_cloud/types/user_organization_create.py +4 -4
- llama_cloud/types/user_organization_delete.py +2 -2
- llama_cloud/types/user_organization_role.py +2 -2
- llama_cloud/types/vertex_text_embedding.py +5 -9
- {llama_cloud-0.1.6.dist-info → llama_cloud-0.1.7.dist-info}/METADATA +2 -1
- llama_cloud-0.1.7.dist-info/RECORD +310 -0
- llama_cloud/types/value.py +0 -5
- llama_cloud-0.1.6.dist-info/RECORD +0 -241
- {llama_cloud-0.1.6.dist-info → llama_cloud-0.1.7.dist-info}/LICENSE +0 -0
- {llama_cloud-0.1.6.dist-info → llama_cloud-0.1.7.dist-info}/WHEEL +0 -0
|
@@ -0,0 +1,1038 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
import typing
|
|
4
|
+
import urllib.parse
|
|
5
|
+
from json.decoder import JSONDecodeError
|
|
6
|
+
|
|
7
|
+
from ...core.api_error import ApiError
|
|
8
|
+
from ...core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
|
|
9
|
+
from ...core.jsonable_encoder import jsonable_encoder
|
|
10
|
+
from ...core.remove_none_from_dict import remove_none_from_dict
|
|
11
|
+
from ...errors.unprocessable_entity_error import UnprocessableEntityError
|
|
12
|
+
from ...types.extract_agent import ExtractAgent
|
|
13
|
+
from ...types.extract_config import ExtractConfig
|
|
14
|
+
from ...types.extract_job import ExtractJob
|
|
15
|
+
from ...types.extract_job_create import ExtractJobCreate
|
|
16
|
+
from ...types.extract_resultset import ExtractResultset
|
|
17
|
+
from ...types.http_validation_error import HttpValidationError
|
|
18
|
+
from .types.extract_agent_create_data_schema_value import ExtractAgentCreateDataSchemaValue
|
|
19
|
+
from .types.extract_agent_update_data_schema_value import ExtractAgentUpdateDataSchemaValue
|
|
20
|
+
|
|
21
|
+
try:
|
|
22
|
+
import pydantic
|
|
23
|
+
if pydantic.__version__.startswith("1."):
|
|
24
|
+
raise ImportError
|
|
25
|
+
import pydantic.v1 as pydantic # type: ignore
|
|
26
|
+
except ImportError:
|
|
27
|
+
import pydantic # type: ignore
|
|
28
|
+
|
|
29
|
+
# this is used as the default value for optional parameters
|
|
30
|
+
OMIT = typing.cast(typing.Any, ...)
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
class LlamaExtractClient:
|
|
34
|
+
def __init__(self, *, client_wrapper: SyncClientWrapper):
|
|
35
|
+
self._client_wrapper = client_wrapper
|
|
36
|
+
|
|
37
|
+
def create_extraction_agent(
|
|
38
|
+
self,
|
|
39
|
+
*,
|
|
40
|
+
project_id: typing.Optional[str] = None,
|
|
41
|
+
organization_id: typing.Optional[str] = None,
|
|
42
|
+
name: str,
|
|
43
|
+
data_schema: typing.Dict[str, typing.Optional[ExtractAgentCreateDataSchemaValue]],
|
|
44
|
+
config: ExtractConfig,
|
|
45
|
+
) -> ExtractAgent:
|
|
46
|
+
"""
|
|
47
|
+
Parameters:
|
|
48
|
+
- project_id: typing.Optional[str].
|
|
49
|
+
|
|
50
|
+
- organization_id: typing.Optional[str].
|
|
51
|
+
|
|
52
|
+
- name: str. The name of the extraction schema
|
|
53
|
+
|
|
54
|
+
- data_schema: typing.Dict[str, typing.Optional[ExtractAgentCreateDataSchemaValue]]. The schema of the data.
|
|
55
|
+
|
|
56
|
+
- config: ExtractConfig. The configuration parameters for the extraction agent.
|
|
57
|
+
---
|
|
58
|
+
from llama_cloud import ExtractConfig, ExtractMode
|
|
59
|
+
from llama_cloud.client import LlamaCloud
|
|
60
|
+
|
|
61
|
+
client = LlamaCloud(
|
|
62
|
+
token="YOUR_TOKEN",
|
|
63
|
+
)
|
|
64
|
+
client.llama_extract.create_extraction_agent(
|
|
65
|
+
name="string",
|
|
66
|
+
data_schema={},
|
|
67
|
+
config=ExtractConfig(
|
|
68
|
+
extraction_mode=ExtractMode.PER_DOC,
|
|
69
|
+
),
|
|
70
|
+
)
|
|
71
|
+
"""
|
|
72
|
+
_response = self._client_wrapper.httpx_client.request(
|
|
73
|
+
"POST",
|
|
74
|
+
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/extraction_agent"),
|
|
75
|
+
params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
|
|
76
|
+
json=jsonable_encoder({"name": name, "data_schema": data_schema, "config": config}),
|
|
77
|
+
headers=self._client_wrapper.get_headers(),
|
|
78
|
+
timeout=60,
|
|
79
|
+
)
|
|
80
|
+
if 200 <= _response.status_code < 300:
|
|
81
|
+
return pydantic.parse_obj_as(ExtractAgent, _response.json()) # type: ignore
|
|
82
|
+
if _response.status_code == 422:
|
|
83
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
84
|
+
try:
|
|
85
|
+
_response_json = _response.json()
|
|
86
|
+
except JSONDecodeError:
|
|
87
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
88
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
89
|
+
|
|
90
|
+
def list_extraction_agents(
|
|
91
|
+
self, *, project_id: str, organization_id: typing.Optional[str] = None
|
|
92
|
+
) -> typing.List[ExtractAgent]:
|
|
93
|
+
"""
|
|
94
|
+
Parameters:
|
|
95
|
+
- project_id: str.
|
|
96
|
+
|
|
97
|
+
- organization_id: typing.Optional[str].
|
|
98
|
+
---
|
|
99
|
+
from llama_cloud.client import LlamaCloud
|
|
100
|
+
|
|
101
|
+
client = LlamaCloud(
|
|
102
|
+
token="YOUR_TOKEN",
|
|
103
|
+
)
|
|
104
|
+
client.llama_extract.list_extraction_agents(
|
|
105
|
+
project_id="string",
|
|
106
|
+
)
|
|
107
|
+
"""
|
|
108
|
+
_response = self._client_wrapper.httpx_client.request(
|
|
109
|
+
"GET",
|
|
110
|
+
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/extraction_agents"),
|
|
111
|
+
params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
|
|
112
|
+
headers=self._client_wrapper.get_headers(),
|
|
113
|
+
timeout=60,
|
|
114
|
+
)
|
|
115
|
+
if 200 <= _response.status_code < 300:
|
|
116
|
+
return pydantic.parse_obj_as(typing.List[ExtractAgent], _response.json()) # type: ignore
|
|
117
|
+
if _response.status_code == 422:
|
|
118
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
119
|
+
try:
|
|
120
|
+
_response_json = _response.json()
|
|
121
|
+
except JSONDecodeError:
|
|
122
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
123
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
124
|
+
|
|
125
|
+
def get_extraction_agent(
|
|
126
|
+
self,
|
|
127
|
+
extraction_agent_id: str,
|
|
128
|
+
*,
|
|
129
|
+
project_id: typing.Optional[str] = None,
|
|
130
|
+
organization_id: typing.Optional[str] = None,
|
|
131
|
+
) -> ExtractAgent:
|
|
132
|
+
"""
|
|
133
|
+
Parameters:
|
|
134
|
+
- extraction_agent_id: str.
|
|
135
|
+
|
|
136
|
+
- project_id: typing.Optional[str].
|
|
137
|
+
|
|
138
|
+
- organization_id: typing.Optional[str].
|
|
139
|
+
---
|
|
140
|
+
from llama_cloud.client import LlamaCloud
|
|
141
|
+
|
|
142
|
+
client = LlamaCloud(
|
|
143
|
+
token="YOUR_TOKEN",
|
|
144
|
+
)
|
|
145
|
+
client.llama_extract.get_extraction_agent(
|
|
146
|
+
extraction_agent_id="string",
|
|
147
|
+
)
|
|
148
|
+
"""
|
|
149
|
+
_response = self._client_wrapper.httpx_client.request(
|
|
150
|
+
"GET",
|
|
151
|
+
urllib.parse.urljoin(
|
|
152
|
+
f"{self._client_wrapper.get_base_url()}/",
|
|
153
|
+
f"api/v1/extractionv2/extraction_agents/{extraction_agent_id}",
|
|
154
|
+
),
|
|
155
|
+
params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
|
|
156
|
+
headers=self._client_wrapper.get_headers(),
|
|
157
|
+
timeout=60,
|
|
158
|
+
)
|
|
159
|
+
if 200 <= _response.status_code < 300:
|
|
160
|
+
return pydantic.parse_obj_as(ExtractAgent, _response.json()) # type: ignore
|
|
161
|
+
if _response.status_code == 422:
|
|
162
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
163
|
+
try:
|
|
164
|
+
_response_json = _response.json()
|
|
165
|
+
except JSONDecodeError:
|
|
166
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
167
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
168
|
+
|
|
169
|
+
def update_extraction_agent(
|
|
170
|
+
self,
|
|
171
|
+
extraction_agent_id: str,
|
|
172
|
+
*,
|
|
173
|
+
project_id: typing.Optional[str] = None,
|
|
174
|
+
organization_id: typing.Optional[str] = None,
|
|
175
|
+
data_schema: typing.Dict[str, typing.Optional[ExtractAgentUpdateDataSchemaValue]],
|
|
176
|
+
config: ExtractConfig,
|
|
177
|
+
) -> ExtractAgent:
|
|
178
|
+
"""
|
|
179
|
+
Parameters:
|
|
180
|
+
- extraction_agent_id: str.
|
|
181
|
+
|
|
182
|
+
- project_id: typing.Optional[str].
|
|
183
|
+
|
|
184
|
+
- organization_id: typing.Optional[str].
|
|
185
|
+
|
|
186
|
+
- data_schema: typing.Dict[str, typing.Optional[ExtractAgentUpdateDataSchemaValue]]. The schema of the data
|
|
187
|
+
|
|
188
|
+
- config: ExtractConfig. The configuration parameters for the extraction agent.
|
|
189
|
+
---
|
|
190
|
+
from llama_cloud import ExtractConfig, ExtractMode
|
|
191
|
+
from llama_cloud.client import LlamaCloud
|
|
192
|
+
|
|
193
|
+
client = LlamaCloud(
|
|
194
|
+
token="YOUR_TOKEN",
|
|
195
|
+
)
|
|
196
|
+
client.llama_extract.update_extraction_agent(
|
|
197
|
+
extraction_agent_id="string",
|
|
198
|
+
data_schema={},
|
|
199
|
+
config=ExtractConfig(
|
|
200
|
+
extraction_mode=ExtractMode.PER_DOC,
|
|
201
|
+
),
|
|
202
|
+
)
|
|
203
|
+
"""
|
|
204
|
+
_response = self._client_wrapper.httpx_client.request(
|
|
205
|
+
"PUT",
|
|
206
|
+
urllib.parse.urljoin(
|
|
207
|
+
f"{self._client_wrapper.get_base_url()}/",
|
|
208
|
+
f"api/v1/extractionv2/extraction_agents/{extraction_agent_id}",
|
|
209
|
+
),
|
|
210
|
+
params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
|
|
211
|
+
json=jsonable_encoder({"data_schema": data_schema, "config": config}),
|
|
212
|
+
headers=self._client_wrapper.get_headers(),
|
|
213
|
+
timeout=60,
|
|
214
|
+
)
|
|
215
|
+
if 200 <= _response.status_code < 300:
|
|
216
|
+
return pydantic.parse_obj_as(ExtractAgent, _response.json()) # type: ignore
|
|
217
|
+
if _response.status_code == 422:
|
|
218
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
219
|
+
try:
|
|
220
|
+
_response_json = _response.json()
|
|
221
|
+
except JSONDecodeError:
|
|
222
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
223
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
224
|
+
|
|
225
|
+
def delete_extraction_agent(
|
|
226
|
+
self,
|
|
227
|
+
extraction_agent_id: str,
|
|
228
|
+
*,
|
|
229
|
+
project_id: typing.Optional[str] = None,
|
|
230
|
+
organization_id: typing.Optional[str] = None,
|
|
231
|
+
) -> typing.Any:
|
|
232
|
+
"""
|
|
233
|
+
Parameters:
|
|
234
|
+
- extraction_agent_id: str.
|
|
235
|
+
|
|
236
|
+
- project_id: typing.Optional[str].
|
|
237
|
+
|
|
238
|
+
- organization_id: typing.Optional[str].
|
|
239
|
+
---
|
|
240
|
+
from llama_cloud.client import LlamaCloud
|
|
241
|
+
|
|
242
|
+
client = LlamaCloud(
|
|
243
|
+
token="YOUR_TOKEN",
|
|
244
|
+
)
|
|
245
|
+
client.llama_extract.delete_extraction_agent(
|
|
246
|
+
extraction_agent_id="string",
|
|
247
|
+
)
|
|
248
|
+
"""
|
|
249
|
+
_response = self._client_wrapper.httpx_client.request(
|
|
250
|
+
"DELETE",
|
|
251
|
+
urllib.parse.urljoin(
|
|
252
|
+
f"{self._client_wrapper.get_base_url()}/",
|
|
253
|
+
f"api/v1/extractionv2/extraction_agents/{extraction_agent_id}",
|
|
254
|
+
),
|
|
255
|
+
params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
|
|
256
|
+
headers=self._client_wrapper.get_headers(),
|
|
257
|
+
timeout=60,
|
|
258
|
+
)
|
|
259
|
+
if 200 <= _response.status_code < 300:
|
|
260
|
+
return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
|
|
261
|
+
if _response.status_code == 422:
|
|
262
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
263
|
+
try:
|
|
264
|
+
_response_json = _response.json()
|
|
265
|
+
except JSONDecodeError:
|
|
266
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
267
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
268
|
+
|
|
269
|
+
def list_jobs(
|
|
270
|
+
self,
|
|
271
|
+
*,
|
|
272
|
+
extraction_agent_id: str,
|
|
273
|
+
project_id: typing.Optional[str] = None,
|
|
274
|
+
organization_id: typing.Optional[str] = None,
|
|
275
|
+
) -> typing.List[ExtractJob]:
|
|
276
|
+
"""
|
|
277
|
+
Parameters:
|
|
278
|
+
- extraction_agent_id: str.
|
|
279
|
+
|
|
280
|
+
- project_id: typing.Optional[str].
|
|
281
|
+
|
|
282
|
+
- organization_id: typing.Optional[str].
|
|
283
|
+
---
|
|
284
|
+
from llama_cloud.client import LlamaCloud
|
|
285
|
+
|
|
286
|
+
client = LlamaCloud(
|
|
287
|
+
token="YOUR_TOKEN",
|
|
288
|
+
)
|
|
289
|
+
client.llama_extract.list_jobs(
|
|
290
|
+
extraction_agent_id="string",
|
|
291
|
+
)
|
|
292
|
+
"""
|
|
293
|
+
_response = self._client_wrapper.httpx_client.request(
|
|
294
|
+
"GET",
|
|
295
|
+
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/jobs"),
|
|
296
|
+
params=remove_none_from_dict(
|
|
297
|
+
{
|
|
298
|
+
"extraction_agent_id": extraction_agent_id,
|
|
299
|
+
"project_id": project_id,
|
|
300
|
+
"organization_id": organization_id,
|
|
301
|
+
}
|
|
302
|
+
),
|
|
303
|
+
headers=self._client_wrapper.get_headers(),
|
|
304
|
+
timeout=60,
|
|
305
|
+
)
|
|
306
|
+
if 200 <= _response.status_code < 300:
|
|
307
|
+
return pydantic.parse_obj_as(typing.List[ExtractJob], _response.json()) # type: ignore
|
|
308
|
+
if _response.status_code == 422:
|
|
309
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
310
|
+
try:
|
|
311
|
+
_response_json = _response.json()
|
|
312
|
+
except JSONDecodeError:
|
|
313
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
314
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
315
|
+
|
|
316
|
+
def run_job(
|
|
317
|
+
self,
|
|
318
|
+
*,
|
|
319
|
+
project_id: typing.Optional[str] = None,
|
|
320
|
+
organization_id: typing.Optional[str] = None,
|
|
321
|
+
request: ExtractJobCreate,
|
|
322
|
+
) -> ExtractJob:
|
|
323
|
+
"""
|
|
324
|
+
Parameters:
|
|
325
|
+
- project_id: typing.Optional[str].
|
|
326
|
+
|
|
327
|
+
- organization_id: typing.Optional[str].
|
|
328
|
+
|
|
329
|
+
- request: ExtractJobCreate.
|
|
330
|
+
---
|
|
331
|
+
from llama_cloud import ExtractConfig, ExtractJobCreate, ExtractMode
|
|
332
|
+
from llama_cloud.client import LlamaCloud
|
|
333
|
+
|
|
334
|
+
client = LlamaCloud(
|
|
335
|
+
token="YOUR_TOKEN",
|
|
336
|
+
)
|
|
337
|
+
client.llama_extract.run_job(
|
|
338
|
+
request=ExtractJobCreate(
|
|
339
|
+
extraction_agent_id="string",
|
|
340
|
+
file_id="string",
|
|
341
|
+
config_override=ExtractConfig(
|
|
342
|
+
extraction_mode=ExtractMode.PER_DOC,
|
|
343
|
+
),
|
|
344
|
+
),
|
|
345
|
+
)
|
|
346
|
+
"""
|
|
347
|
+
_response = self._client_wrapper.httpx_client.request(
|
|
348
|
+
"POST",
|
|
349
|
+
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/jobs"),
|
|
350
|
+
params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
|
|
351
|
+
json=jsonable_encoder(request),
|
|
352
|
+
headers=self._client_wrapper.get_headers(),
|
|
353
|
+
timeout=60,
|
|
354
|
+
)
|
|
355
|
+
if 200 <= _response.status_code < 300:
|
|
356
|
+
return pydantic.parse_obj_as(ExtractJob, _response.json()) # type: ignore
|
|
357
|
+
if _response.status_code == 422:
|
|
358
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
359
|
+
try:
|
|
360
|
+
_response_json = _response.json()
|
|
361
|
+
except JSONDecodeError:
|
|
362
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
363
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
364
|
+
|
|
365
|
+
def get_job(
|
|
366
|
+
self, job_id: str, *, project_id: typing.Optional[str] = None, organization_id: typing.Optional[str] = None
|
|
367
|
+
) -> ExtractJob:
|
|
368
|
+
"""
|
|
369
|
+
Parameters:
|
|
370
|
+
- job_id: str.
|
|
371
|
+
|
|
372
|
+
- project_id: typing.Optional[str].
|
|
373
|
+
|
|
374
|
+
- organization_id: typing.Optional[str].
|
|
375
|
+
---
|
|
376
|
+
from llama_cloud.client import LlamaCloud
|
|
377
|
+
|
|
378
|
+
client = LlamaCloud(
|
|
379
|
+
token="YOUR_TOKEN",
|
|
380
|
+
)
|
|
381
|
+
client.llama_extract.get_job(
|
|
382
|
+
job_id="string",
|
|
383
|
+
)
|
|
384
|
+
"""
|
|
385
|
+
_response = self._client_wrapper.httpx_client.request(
|
|
386
|
+
"GET",
|
|
387
|
+
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/extractionv2/jobs/{job_id}"),
|
|
388
|
+
params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
|
|
389
|
+
headers=self._client_wrapper.get_headers(),
|
|
390
|
+
timeout=60,
|
|
391
|
+
)
|
|
392
|
+
if 200 <= _response.status_code < 300:
|
|
393
|
+
return pydantic.parse_obj_as(ExtractJob, _response.json()) # type: ignore
|
|
394
|
+
if _response.status_code == 422:
|
|
395
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
396
|
+
try:
|
|
397
|
+
_response_json = _response.json()
|
|
398
|
+
except JSONDecodeError:
|
|
399
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
400
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
401
|
+
|
|
402
|
+
def run_job_with_parsed_file(
|
|
403
|
+
self,
|
|
404
|
+
*,
|
|
405
|
+
project_id: typing.Optional[str] = None,
|
|
406
|
+
organization_id: typing.Optional[str] = None,
|
|
407
|
+
request: ExtractJobCreate,
|
|
408
|
+
) -> typing.Optional[ExtractResultset]:
|
|
409
|
+
"""
|
|
410
|
+
Parameters:
|
|
411
|
+
- project_id: typing.Optional[str].
|
|
412
|
+
|
|
413
|
+
- organization_id: typing.Optional[str].
|
|
414
|
+
|
|
415
|
+
- request: ExtractJobCreate.
|
|
416
|
+
---
|
|
417
|
+
from llama_cloud import ExtractConfig, ExtractJobCreate, ExtractMode
|
|
418
|
+
from llama_cloud.client import LlamaCloud
|
|
419
|
+
|
|
420
|
+
client = LlamaCloud(
|
|
421
|
+
token="YOUR_TOKEN",
|
|
422
|
+
)
|
|
423
|
+
client.llama_extract.run_job_with_parsed_file(
|
|
424
|
+
request=ExtractJobCreate(
|
|
425
|
+
extraction_agent_id="string",
|
|
426
|
+
file_id="string",
|
|
427
|
+
config_override=ExtractConfig(
|
|
428
|
+
extraction_mode=ExtractMode.PER_DOC,
|
|
429
|
+
),
|
|
430
|
+
),
|
|
431
|
+
)
|
|
432
|
+
"""
|
|
433
|
+
_response = self._client_wrapper.httpx_client.request(
|
|
434
|
+
"POST",
|
|
435
|
+
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/jobs/parsed"),
|
|
436
|
+
params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
|
|
437
|
+
json=jsonable_encoder(request),
|
|
438
|
+
headers=self._client_wrapper.get_headers(),
|
|
439
|
+
timeout=60,
|
|
440
|
+
)
|
|
441
|
+
if 200 <= _response.status_code < 300:
|
|
442
|
+
return pydantic.parse_obj_as(typing.Optional[ExtractResultset], _response.json()) # type: ignore
|
|
443
|
+
if _response.status_code == 422:
|
|
444
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
445
|
+
try:
|
|
446
|
+
_response_json = _response.json()
|
|
447
|
+
except JSONDecodeError:
|
|
448
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
449
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
450
|
+
|
|
451
|
+
def run_jobs_in_batch(
|
|
452
|
+
self,
|
|
453
|
+
*,
|
|
454
|
+
project_id: typing.Optional[str] = None,
|
|
455
|
+
organization_id: typing.Optional[str] = None,
|
|
456
|
+
extraction_agent_id: str,
|
|
457
|
+
file_ids: typing.List[str],
|
|
458
|
+
) -> typing.List[ExtractJob]:
|
|
459
|
+
"""
|
|
460
|
+
Parameters:
|
|
461
|
+
- project_id: typing.Optional[str].
|
|
462
|
+
|
|
463
|
+
- organization_id: typing.Optional[str].
|
|
464
|
+
|
|
465
|
+
- extraction_agent_id: str. The id of the extraction agent
|
|
466
|
+
|
|
467
|
+
- file_ids: typing.List[str]. The ids of the files
|
|
468
|
+
---
|
|
469
|
+
from llama_cloud.client import LlamaCloud
|
|
470
|
+
|
|
471
|
+
client = LlamaCloud(
|
|
472
|
+
token="YOUR_TOKEN",
|
|
473
|
+
)
|
|
474
|
+
client.llama_extract.run_jobs_in_batch(
|
|
475
|
+
extraction_agent_id="string",
|
|
476
|
+
file_ids=[],
|
|
477
|
+
)
|
|
478
|
+
"""
|
|
479
|
+
_response = self._client_wrapper.httpx_client.request(
|
|
480
|
+
"POST",
|
|
481
|
+
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/jobs/batch"),
|
|
482
|
+
params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
|
|
483
|
+
json=jsonable_encoder({"extraction_agent_id": extraction_agent_id, "file_ids": file_ids}),
|
|
484
|
+
headers=self._client_wrapper.get_headers(),
|
|
485
|
+
timeout=60,
|
|
486
|
+
)
|
|
487
|
+
if 200 <= _response.status_code < 300:
|
|
488
|
+
return pydantic.parse_obj_as(typing.List[ExtractJob], _response.json()) # type: ignore
|
|
489
|
+
if _response.status_code == 422:
|
|
490
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
491
|
+
try:
|
|
492
|
+
_response_json = _response.json()
|
|
493
|
+
except JSONDecodeError:
|
|
494
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
495
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
496
|
+
|
|
497
|
+
def get_job_result(
|
|
498
|
+
self, job_id: str, *, project_id: typing.Optional[str] = None, organization_id: typing.Optional[str] = None
|
|
499
|
+
) -> ExtractResultset:
|
|
500
|
+
"""
|
|
501
|
+
Parameters:
|
|
502
|
+
- job_id: str.
|
|
503
|
+
|
|
504
|
+
- project_id: typing.Optional[str].
|
|
505
|
+
|
|
506
|
+
- organization_id: typing.Optional[str].
|
|
507
|
+
---
|
|
508
|
+
from llama_cloud.client import LlamaCloud
|
|
509
|
+
|
|
510
|
+
client = LlamaCloud(
|
|
511
|
+
token="YOUR_TOKEN",
|
|
512
|
+
)
|
|
513
|
+
client.llama_extract.get_job_result(
|
|
514
|
+
job_id="string",
|
|
515
|
+
)
|
|
516
|
+
"""
|
|
517
|
+
_response = self._client_wrapper.httpx_client.request(
|
|
518
|
+
"GET",
|
|
519
|
+
urllib.parse.urljoin(
|
|
520
|
+
f"{self._client_wrapper.get_base_url()}/", f"api/v1/extractionv2/jobs/{job_id}/result"
|
|
521
|
+
),
|
|
522
|
+
params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
|
|
523
|
+
headers=self._client_wrapper.get_headers(),
|
|
524
|
+
timeout=60,
|
|
525
|
+
)
|
|
526
|
+
if 200 <= _response.status_code < 300:
|
|
527
|
+
return pydantic.parse_obj_as(ExtractResultset, _response.json()) # type: ignore
|
|
528
|
+
if _response.status_code == 422:
|
|
529
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
530
|
+
try:
|
|
531
|
+
_response_json = _response.json()
|
|
532
|
+
except JSONDecodeError:
|
|
533
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
534
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
535
|
+
|
|
536
|
+
|
|
537
|
+
class AsyncLlamaExtractClient:
|
|
538
|
+
def __init__(self, *, client_wrapper: AsyncClientWrapper):
|
|
539
|
+
self._client_wrapper = client_wrapper
|
|
540
|
+
|
|
541
|
+
async def create_extraction_agent(
|
|
542
|
+
self,
|
|
543
|
+
*,
|
|
544
|
+
project_id: typing.Optional[str] = None,
|
|
545
|
+
organization_id: typing.Optional[str] = None,
|
|
546
|
+
name: str,
|
|
547
|
+
data_schema: typing.Dict[str, typing.Optional[ExtractAgentCreateDataSchemaValue]],
|
|
548
|
+
config: ExtractConfig,
|
|
549
|
+
) -> ExtractAgent:
|
|
550
|
+
"""
|
|
551
|
+
Parameters:
|
|
552
|
+
- project_id: typing.Optional[str].
|
|
553
|
+
|
|
554
|
+
- organization_id: typing.Optional[str].
|
|
555
|
+
|
|
556
|
+
- name: str. The name of the extraction schema
|
|
557
|
+
|
|
558
|
+
- data_schema: typing.Dict[str, typing.Optional[ExtractAgentCreateDataSchemaValue]]. The schema of the data.
|
|
559
|
+
|
|
560
|
+
- config: ExtractConfig. The configuration parameters for the extraction agent.
|
|
561
|
+
---
|
|
562
|
+
from llama_cloud import ExtractConfig, ExtractMode
|
|
563
|
+
from llama_cloud.client import AsyncLlamaCloud
|
|
564
|
+
|
|
565
|
+
client = AsyncLlamaCloud(
|
|
566
|
+
token="YOUR_TOKEN",
|
|
567
|
+
)
|
|
568
|
+
await client.llama_extract.create_extraction_agent(
|
|
569
|
+
name="string",
|
|
570
|
+
data_schema={},
|
|
571
|
+
config=ExtractConfig(
|
|
572
|
+
extraction_mode=ExtractMode.PER_DOC,
|
|
573
|
+
),
|
|
574
|
+
)
|
|
575
|
+
"""
|
|
576
|
+
_response = await self._client_wrapper.httpx_client.request(
|
|
577
|
+
"POST",
|
|
578
|
+
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/extraction_agent"),
|
|
579
|
+
params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
|
|
580
|
+
json=jsonable_encoder({"name": name, "data_schema": data_schema, "config": config}),
|
|
581
|
+
headers=self._client_wrapper.get_headers(),
|
|
582
|
+
timeout=60,
|
|
583
|
+
)
|
|
584
|
+
if 200 <= _response.status_code < 300:
|
|
585
|
+
return pydantic.parse_obj_as(ExtractAgent, _response.json()) # type: ignore
|
|
586
|
+
if _response.status_code == 422:
|
|
587
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
588
|
+
try:
|
|
589
|
+
_response_json = _response.json()
|
|
590
|
+
except JSONDecodeError:
|
|
591
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
592
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
593
|
+
|
|
594
|
+
async def list_extraction_agents(
|
|
595
|
+
self, *, project_id: str, organization_id: typing.Optional[str] = None
|
|
596
|
+
) -> typing.List[ExtractAgent]:
|
|
597
|
+
"""
|
|
598
|
+
Parameters:
|
|
599
|
+
- project_id: str.
|
|
600
|
+
|
|
601
|
+
- organization_id: typing.Optional[str].
|
|
602
|
+
---
|
|
603
|
+
from llama_cloud.client import AsyncLlamaCloud
|
|
604
|
+
|
|
605
|
+
client = AsyncLlamaCloud(
|
|
606
|
+
token="YOUR_TOKEN",
|
|
607
|
+
)
|
|
608
|
+
await client.llama_extract.list_extraction_agents(
|
|
609
|
+
project_id="string",
|
|
610
|
+
)
|
|
611
|
+
"""
|
|
612
|
+
_response = await self._client_wrapper.httpx_client.request(
|
|
613
|
+
"GET",
|
|
614
|
+
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/extraction_agents"),
|
|
615
|
+
params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
|
|
616
|
+
headers=self._client_wrapper.get_headers(),
|
|
617
|
+
timeout=60,
|
|
618
|
+
)
|
|
619
|
+
if 200 <= _response.status_code < 300:
|
|
620
|
+
return pydantic.parse_obj_as(typing.List[ExtractAgent], _response.json()) # type: ignore
|
|
621
|
+
if _response.status_code == 422:
|
|
622
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
623
|
+
try:
|
|
624
|
+
_response_json = _response.json()
|
|
625
|
+
except JSONDecodeError:
|
|
626
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
627
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
628
|
+
|
|
629
|
+
async def get_extraction_agent(
|
|
630
|
+
self,
|
|
631
|
+
extraction_agent_id: str,
|
|
632
|
+
*,
|
|
633
|
+
project_id: typing.Optional[str] = None,
|
|
634
|
+
organization_id: typing.Optional[str] = None,
|
|
635
|
+
) -> ExtractAgent:
|
|
636
|
+
"""
|
|
637
|
+
Parameters:
|
|
638
|
+
- extraction_agent_id: str.
|
|
639
|
+
|
|
640
|
+
- project_id: typing.Optional[str].
|
|
641
|
+
|
|
642
|
+
- organization_id: typing.Optional[str].
|
|
643
|
+
---
|
|
644
|
+
from llama_cloud.client import AsyncLlamaCloud
|
|
645
|
+
|
|
646
|
+
client = AsyncLlamaCloud(
|
|
647
|
+
token="YOUR_TOKEN",
|
|
648
|
+
)
|
|
649
|
+
await client.llama_extract.get_extraction_agent(
|
|
650
|
+
extraction_agent_id="string",
|
|
651
|
+
)
|
|
652
|
+
"""
|
|
653
|
+
_response = await self._client_wrapper.httpx_client.request(
|
|
654
|
+
"GET",
|
|
655
|
+
urllib.parse.urljoin(
|
|
656
|
+
f"{self._client_wrapper.get_base_url()}/",
|
|
657
|
+
f"api/v1/extractionv2/extraction_agents/{extraction_agent_id}",
|
|
658
|
+
),
|
|
659
|
+
params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
|
|
660
|
+
headers=self._client_wrapper.get_headers(),
|
|
661
|
+
timeout=60,
|
|
662
|
+
)
|
|
663
|
+
if 200 <= _response.status_code < 300:
|
|
664
|
+
return pydantic.parse_obj_as(ExtractAgent, _response.json()) # type: ignore
|
|
665
|
+
if _response.status_code == 422:
|
|
666
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
667
|
+
try:
|
|
668
|
+
_response_json = _response.json()
|
|
669
|
+
except JSONDecodeError:
|
|
670
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
671
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
672
|
+
|
|
673
|
+
async def update_extraction_agent(
|
|
674
|
+
self,
|
|
675
|
+
extraction_agent_id: str,
|
|
676
|
+
*,
|
|
677
|
+
project_id: typing.Optional[str] = None,
|
|
678
|
+
organization_id: typing.Optional[str] = None,
|
|
679
|
+
data_schema: typing.Dict[str, typing.Optional[ExtractAgentUpdateDataSchemaValue]],
|
|
680
|
+
config: ExtractConfig,
|
|
681
|
+
) -> ExtractAgent:
|
|
682
|
+
"""
|
|
683
|
+
Parameters:
|
|
684
|
+
- extraction_agent_id: str.
|
|
685
|
+
|
|
686
|
+
- project_id: typing.Optional[str].
|
|
687
|
+
|
|
688
|
+
- organization_id: typing.Optional[str].
|
|
689
|
+
|
|
690
|
+
- data_schema: typing.Dict[str, typing.Optional[ExtractAgentUpdateDataSchemaValue]]. The schema of the data
|
|
691
|
+
|
|
692
|
+
- config: ExtractConfig. The configuration parameters for the extraction agent.
|
|
693
|
+
---
|
|
694
|
+
from llama_cloud import ExtractConfig, ExtractMode
|
|
695
|
+
from llama_cloud.client import AsyncLlamaCloud
|
|
696
|
+
|
|
697
|
+
client = AsyncLlamaCloud(
|
|
698
|
+
token="YOUR_TOKEN",
|
|
699
|
+
)
|
|
700
|
+
await client.llama_extract.update_extraction_agent(
|
|
701
|
+
extraction_agent_id="string",
|
|
702
|
+
data_schema={},
|
|
703
|
+
config=ExtractConfig(
|
|
704
|
+
extraction_mode=ExtractMode.PER_DOC,
|
|
705
|
+
),
|
|
706
|
+
)
|
|
707
|
+
"""
|
|
708
|
+
_response = await self._client_wrapper.httpx_client.request(
|
|
709
|
+
"PUT",
|
|
710
|
+
urllib.parse.urljoin(
|
|
711
|
+
f"{self._client_wrapper.get_base_url()}/",
|
|
712
|
+
f"api/v1/extractionv2/extraction_agents/{extraction_agent_id}",
|
|
713
|
+
),
|
|
714
|
+
params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
|
|
715
|
+
json=jsonable_encoder({"data_schema": data_schema, "config": config}),
|
|
716
|
+
headers=self._client_wrapper.get_headers(),
|
|
717
|
+
timeout=60,
|
|
718
|
+
)
|
|
719
|
+
if 200 <= _response.status_code < 300:
|
|
720
|
+
return pydantic.parse_obj_as(ExtractAgent, _response.json()) # type: ignore
|
|
721
|
+
if _response.status_code == 422:
|
|
722
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
723
|
+
try:
|
|
724
|
+
_response_json = _response.json()
|
|
725
|
+
except JSONDecodeError:
|
|
726
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
727
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
728
|
+
|
|
729
|
+
async def delete_extraction_agent(
|
|
730
|
+
self,
|
|
731
|
+
extraction_agent_id: str,
|
|
732
|
+
*,
|
|
733
|
+
project_id: typing.Optional[str] = None,
|
|
734
|
+
organization_id: typing.Optional[str] = None,
|
|
735
|
+
) -> typing.Any:
|
|
736
|
+
"""
|
|
737
|
+
Parameters:
|
|
738
|
+
- extraction_agent_id: str.
|
|
739
|
+
|
|
740
|
+
- project_id: typing.Optional[str].
|
|
741
|
+
|
|
742
|
+
- organization_id: typing.Optional[str].
|
|
743
|
+
---
|
|
744
|
+
from llama_cloud.client import AsyncLlamaCloud
|
|
745
|
+
|
|
746
|
+
client = AsyncLlamaCloud(
|
|
747
|
+
token="YOUR_TOKEN",
|
|
748
|
+
)
|
|
749
|
+
await client.llama_extract.delete_extraction_agent(
|
|
750
|
+
extraction_agent_id="string",
|
|
751
|
+
)
|
|
752
|
+
"""
|
|
753
|
+
_response = await self._client_wrapper.httpx_client.request(
|
|
754
|
+
"DELETE",
|
|
755
|
+
urllib.parse.urljoin(
|
|
756
|
+
f"{self._client_wrapper.get_base_url()}/",
|
|
757
|
+
f"api/v1/extractionv2/extraction_agents/{extraction_agent_id}",
|
|
758
|
+
),
|
|
759
|
+
params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
|
|
760
|
+
headers=self._client_wrapper.get_headers(),
|
|
761
|
+
timeout=60,
|
|
762
|
+
)
|
|
763
|
+
if 200 <= _response.status_code < 300:
|
|
764
|
+
return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
|
|
765
|
+
if _response.status_code == 422:
|
|
766
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
767
|
+
try:
|
|
768
|
+
_response_json = _response.json()
|
|
769
|
+
except JSONDecodeError:
|
|
770
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
771
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
772
|
+
|
|
773
|
+
async def list_jobs(
|
|
774
|
+
self,
|
|
775
|
+
*,
|
|
776
|
+
extraction_agent_id: str,
|
|
777
|
+
project_id: typing.Optional[str] = None,
|
|
778
|
+
organization_id: typing.Optional[str] = None,
|
|
779
|
+
) -> typing.List[ExtractJob]:
|
|
780
|
+
"""
|
|
781
|
+
Parameters:
|
|
782
|
+
- extraction_agent_id: str.
|
|
783
|
+
|
|
784
|
+
- project_id: typing.Optional[str].
|
|
785
|
+
|
|
786
|
+
- organization_id: typing.Optional[str].
|
|
787
|
+
---
|
|
788
|
+
from llama_cloud.client import AsyncLlamaCloud
|
|
789
|
+
|
|
790
|
+
client = AsyncLlamaCloud(
|
|
791
|
+
token="YOUR_TOKEN",
|
|
792
|
+
)
|
|
793
|
+
await client.llama_extract.list_jobs(
|
|
794
|
+
extraction_agent_id="string",
|
|
795
|
+
)
|
|
796
|
+
"""
|
|
797
|
+
_response = await self._client_wrapper.httpx_client.request(
|
|
798
|
+
"GET",
|
|
799
|
+
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/jobs"),
|
|
800
|
+
params=remove_none_from_dict(
|
|
801
|
+
{
|
|
802
|
+
"extraction_agent_id": extraction_agent_id,
|
|
803
|
+
"project_id": project_id,
|
|
804
|
+
"organization_id": organization_id,
|
|
805
|
+
}
|
|
806
|
+
),
|
|
807
|
+
headers=self._client_wrapper.get_headers(),
|
|
808
|
+
timeout=60,
|
|
809
|
+
)
|
|
810
|
+
if 200 <= _response.status_code < 300:
|
|
811
|
+
return pydantic.parse_obj_as(typing.List[ExtractJob], _response.json()) # type: ignore
|
|
812
|
+
if _response.status_code == 422:
|
|
813
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
814
|
+
try:
|
|
815
|
+
_response_json = _response.json()
|
|
816
|
+
except JSONDecodeError:
|
|
817
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
818
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
819
|
+
|
|
820
|
+
async def run_job(
|
|
821
|
+
self,
|
|
822
|
+
*,
|
|
823
|
+
project_id: typing.Optional[str] = None,
|
|
824
|
+
organization_id: typing.Optional[str] = None,
|
|
825
|
+
request: ExtractJobCreate,
|
|
826
|
+
) -> ExtractJob:
|
|
827
|
+
"""
|
|
828
|
+
Parameters:
|
|
829
|
+
- project_id: typing.Optional[str].
|
|
830
|
+
|
|
831
|
+
- organization_id: typing.Optional[str].
|
|
832
|
+
|
|
833
|
+
- request: ExtractJobCreate.
|
|
834
|
+
---
|
|
835
|
+
from llama_cloud import ExtractConfig, ExtractJobCreate, ExtractMode
|
|
836
|
+
from llama_cloud.client import AsyncLlamaCloud
|
|
837
|
+
|
|
838
|
+
client = AsyncLlamaCloud(
|
|
839
|
+
token="YOUR_TOKEN",
|
|
840
|
+
)
|
|
841
|
+
await client.llama_extract.run_job(
|
|
842
|
+
request=ExtractJobCreate(
|
|
843
|
+
extraction_agent_id="string",
|
|
844
|
+
file_id="string",
|
|
845
|
+
config_override=ExtractConfig(
|
|
846
|
+
extraction_mode=ExtractMode.PER_DOC,
|
|
847
|
+
),
|
|
848
|
+
),
|
|
849
|
+
)
|
|
850
|
+
"""
|
|
851
|
+
_response = await self._client_wrapper.httpx_client.request(
|
|
852
|
+
"POST",
|
|
853
|
+
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/jobs"),
|
|
854
|
+
params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
|
|
855
|
+
json=jsonable_encoder(request),
|
|
856
|
+
headers=self._client_wrapper.get_headers(),
|
|
857
|
+
timeout=60,
|
|
858
|
+
)
|
|
859
|
+
if 200 <= _response.status_code < 300:
|
|
860
|
+
return pydantic.parse_obj_as(ExtractJob, _response.json()) # type: ignore
|
|
861
|
+
if _response.status_code == 422:
|
|
862
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
863
|
+
try:
|
|
864
|
+
_response_json = _response.json()
|
|
865
|
+
except JSONDecodeError:
|
|
866
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
867
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
868
|
+
|
|
869
|
+
async def get_job(
|
|
870
|
+
self, job_id: str, *, project_id: typing.Optional[str] = None, organization_id: typing.Optional[str] = None
|
|
871
|
+
) -> ExtractJob:
|
|
872
|
+
"""
|
|
873
|
+
Parameters:
|
|
874
|
+
- job_id: str.
|
|
875
|
+
|
|
876
|
+
- project_id: typing.Optional[str].
|
|
877
|
+
|
|
878
|
+
- organization_id: typing.Optional[str].
|
|
879
|
+
---
|
|
880
|
+
from llama_cloud.client import AsyncLlamaCloud
|
|
881
|
+
|
|
882
|
+
client = AsyncLlamaCloud(
|
|
883
|
+
token="YOUR_TOKEN",
|
|
884
|
+
)
|
|
885
|
+
await client.llama_extract.get_job(
|
|
886
|
+
job_id="string",
|
|
887
|
+
)
|
|
888
|
+
"""
|
|
889
|
+
_response = await self._client_wrapper.httpx_client.request(
|
|
890
|
+
"GET",
|
|
891
|
+
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/extractionv2/jobs/{job_id}"),
|
|
892
|
+
params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
|
|
893
|
+
headers=self._client_wrapper.get_headers(),
|
|
894
|
+
timeout=60,
|
|
895
|
+
)
|
|
896
|
+
if 200 <= _response.status_code < 300:
|
|
897
|
+
return pydantic.parse_obj_as(ExtractJob, _response.json()) # type: ignore
|
|
898
|
+
if _response.status_code == 422:
|
|
899
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
900
|
+
try:
|
|
901
|
+
_response_json = _response.json()
|
|
902
|
+
except JSONDecodeError:
|
|
903
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
904
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
905
|
+
|
|
906
|
+
async def run_job_with_parsed_file(
|
|
907
|
+
self,
|
|
908
|
+
*,
|
|
909
|
+
project_id: typing.Optional[str] = None,
|
|
910
|
+
organization_id: typing.Optional[str] = None,
|
|
911
|
+
request: ExtractJobCreate,
|
|
912
|
+
) -> typing.Optional[ExtractResultset]:
|
|
913
|
+
"""
|
|
914
|
+
Parameters:
|
|
915
|
+
- project_id: typing.Optional[str].
|
|
916
|
+
|
|
917
|
+
- organization_id: typing.Optional[str].
|
|
918
|
+
|
|
919
|
+
- request: ExtractJobCreate.
|
|
920
|
+
---
|
|
921
|
+
from llama_cloud import ExtractConfig, ExtractJobCreate, ExtractMode
|
|
922
|
+
from llama_cloud.client import AsyncLlamaCloud
|
|
923
|
+
|
|
924
|
+
client = AsyncLlamaCloud(
|
|
925
|
+
token="YOUR_TOKEN",
|
|
926
|
+
)
|
|
927
|
+
await client.llama_extract.run_job_with_parsed_file(
|
|
928
|
+
request=ExtractJobCreate(
|
|
929
|
+
extraction_agent_id="string",
|
|
930
|
+
file_id="string",
|
|
931
|
+
config_override=ExtractConfig(
|
|
932
|
+
extraction_mode=ExtractMode.PER_DOC,
|
|
933
|
+
),
|
|
934
|
+
),
|
|
935
|
+
)
|
|
936
|
+
"""
|
|
937
|
+
_response = await self._client_wrapper.httpx_client.request(
|
|
938
|
+
"POST",
|
|
939
|
+
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/jobs/parsed"),
|
|
940
|
+
params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
|
|
941
|
+
json=jsonable_encoder(request),
|
|
942
|
+
headers=self._client_wrapper.get_headers(),
|
|
943
|
+
timeout=60,
|
|
944
|
+
)
|
|
945
|
+
if 200 <= _response.status_code < 300:
|
|
946
|
+
return pydantic.parse_obj_as(typing.Optional[ExtractResultset], _response.json()) # type: ignore
|
|
947
|
+
if _response.status_code == 422:
|
|
948
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
949
|
+
try:
|
|
950
|
+
_response_json = _response.json()
|
|
951
|
+
except JSONDecodeError:
|
|
952
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
953
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
954
|
+
|
|
955
|
+
async def run_jobs_in_batch(
|
|
956
|
+
self,
|
|
957
|
+
*,
|
|
958
|
+
project_id: typing.Optional[str] = None,
|
|
959
|
+
organization_id: typing.Optional[str] = None,
|
|
960
|
+
extraction_agent_id: str,
|
|
961
|
+
file_ids: typing.List[str],
|
|
962
|
+
) -> typing.List[ExtractJob]:
|
|
963
|
+
"""
|
|
964
|
+
Parameters:
|
|
965
|
+
- project_id: typing.Optional[str].
|
|
966
|
+
|
|
967
|
+
- organization_id: typing.Optional[str].
|
|
968
|
+
|
|
969
|
+
- extraction_agent_id: str. The id of the extraction agent
|
|
970
|
+
|
|
971
|
+
- file_ids: typing.List[str]. The ids of the files
|
|
972
|
+
---
|
|
973
|
+
from llama_cloud.client import AsyncLlamaCloud
|
|
974
|
+
|
|
975
|
+
client = AsyncLlamaCloud(
|
|
976
|
+
token="YOUR_TOKEN",
|
|
977
|
+
)
|
|
978
|
+
await client.llama_extract.run_jobs_in_batch(
|
|
979
|
+
extraction_agent_id="string",
|
|
980
|
+
file_ids=[],
|
|
981
|
+
)
|
|
982
|
+
"""
|
|
983
|
+
_response = await self._client_wrapper.httpx_client.request(
|
|
984
|
+
"POST",
|
|
985
|
+
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/jobs/batch"),
|
|
986
|
+
params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
|
|
987
|
+
json=jsonable_encoder({"extraction_agent_id": extraction_agent_id, "file_ids": file_ids}),
|
|
988
|
+
headers=self._client_wrapper.get_headers(),
|
|
989
|
+
timeout=60,
|
|
990
|
+
)
|
|
991
|
+
if 200 <= _response.status_code < 300:
|
|
992
|
+
return pydantic.parse_obj_as(typing.List[ExtractJob], _response.json()) # type: ignore
|
|
993
|
+
if _response.status_code == 422:
|
|
994
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
995
|
+
try:
|
|
996
|
+
_response_json = _response.json()
|
|
997
|
+
except JSONDecodeError:
|
|
998
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
999
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
1000
|
+
|
|
1001
|
+
async def get_job_result(
|
|
1002
|
+
self, job_id: str, *, project_id: typing.Optional[str] = None, organization_id: typing.Optional[str] = None
|
|
1003
|
+
) -> ExtractResultset:
|
|
1004
|
+
"""
|
|
1005
|
+
Parameters:
|
|
1006
|
+
- job_id: str.
|
|
1007
|
+
|
|
1008
|
+
- project_id: typing.Optional[str].
|
|
1009
|
+
|
|
1010
|
+
- organization_id: typing.Optional[str].
|
|
1011
|
+
---
|
|
1012
|
+
from llama_cloud.client import AsyncLlamaCloud
|
|
1013
|
+
|
|
1014
|
+
client = AsyncLlamaCloud(
|
|
1015
|
+
token="YOUR_TOKEN",
|
|
1016
|
+
)
|
|
1017
|
+
await client.llama_extract.get_job_result(
|
|
1018
|
+
job_id="string",
|
|
1019
|
+
)
|
|
1020
|
+
"""
|
|
1021
|
+
_response = await self._client_wrapper.httpx_client.request(
|
|
1022
|
+
"GET",
|
|
1023
|
+
urllib.parse.urljoin(
|
|
1024
|
+
f"{self._client_wrapper.get_base_url()}/", f"api/v1/extractionv2/jobs/{job_id}/result"
|
|
1025
|
+
),
|
|
1026
|
+
params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
|
|
1027
|
+
headers=self._client_wrapper.get_headers(),
|
|
1028
|
+
timeout=60,
|
|
1029
|
+
)
|
|
1030
|
+
if 200 <= _response.status_code < 300:
|
|
1031
|
+
return pydantic.parse_obj_as(ExtractResultset, _response.json()) # type: ignore
|
|
1032
|
+
if _response.status_code == 422:
|
|
1033
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
1034
|
+
try:
|
|
1035
|
+
_response_json = _response.json()
|
|
1036
|
+
except JSONDecodeError:
|
|
1037
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
1038
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|