llama-cloud 0.1.9__py3-none-any.whl → 0.1.11__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of llama-cloud might be problematic. Click here for more details.
- llama_cloud/__init__.py +34 -8
- llama_cloud/resources/__init__.py +14 -3
- llama_cloud/resources/chat_apps/client.py +99 -133
- llama_cloud/resources/llama_extract/__init__.py +16 -2
- llama_cloud/resources/llama_extract/client.py +328 -122
- llama_cloud/resources/llama_extract/types/__init__.py +14 -3
- llama_cloud/resources/llama_extract/types/extract_agent_create_data_schema.py +9 -0
- llama_cloud/resources/llama_extract/types/{extract_agent_create_data_schema_value.py → extract_agent_create_data_schema_zero_value.py} +1 -1
- llama_cloud/resources/llama_extract/types/extract_agent_update_data_schema.py +9 -0
- llama_cloud/resources/llama_extract/types/{extract_agent_update_data_schema_value.py → extract_agent_update_data_schema_zero_value.py} +1 -1
- llama_cloud/resources/llama_extract/types/extract_schema_validate_request_data_schema.py +9 -0
- llama_cloud/resources/llama_extract/types/extract_schema_validate_request_data_schema_zero_value.py +7 -0
- llama_cloud/resources/organizations/client.py +8 -12
- llama_cloud/resources/parsing/client.py +244 -0
- llama_cloud/resources/reports/client.py +30 -26
- llama_cloud/resources/retrievers/client.py +16 -4
- llama_cloud/types/__init__.py +22 -4
- llama_cloud/types/chat_app.py +11 -9
- llama_cloud/types/chat_app_response.py +12 -10
- llama_cloud/types/cloud_mongo_db_atlas_vector_search.py +1 -0
- llama_cloud/types/edit_suggestion.py +3 -4
- llama_cloud/types/edit_suggestion_blocks_item.py +8 -0
- llama_cloud/types/extract_config.py +2 -0
- llama_cloud/types/extract_job_create.py +4 -2
- llama_cloud/types/extract_job_create_data_schema_override.py +9 -0
- llama_cloud/types/{extract_job_create_data_schema_override_value.py → extract_job_create_data_schema_override_zero_value.py} +1 -1
- llama_cloud/types/extract_mode.py +7 -7
- llama_cloud/types/extract_run.py +2 -2
- llama_cloud/types/extract_run_data.py +11 -0
- llama_cloud/types/extract_run_data_item_value.py +5 -0
- llama_cloud/types/extract_run_data_zero_value.py +5 -0
- llama_cloud/types/extract_schema_validate_response.py +32 -0
- llama_cloud/types/extract_schema_validate_response_data_schema_value.py +7 -0
- llama_cloud/types/extract_target.py +17 -0
- llama_cloud/types/llama_extract_settings.py +8 -1
- llama_cloud/types/llama_parse_parameters.py +9 -0
- llama_cloud/types/plan.py +4 -0
- llama_cloud/types/preset_composite_retrieval_params.py +35 -0
- llama_cloud/types/report_file_info.py +37 -0
- llama_cloud/types/report_metadata.py +2 -1
- llama_cloud/types/supported_llm_model_names.py +28 -4
- {llama_cloud-0.1.9.dist-info → llama_cloud-0.1.11.dist-info}/METADATA +1 -1
- {llama_cloud-0.1.9.dist-info → llama_cloud-0.1.11.dist-info}/RECORD +45 -32
- llama_cloud/types/extract_run_data_value.py +0 -5
- {llama_cloud-0.1.9.dist-info → llama_cloud-0.1.11.dist-info}/LICENSE +0 -0
- {llama_cloud-0.1.9.dist-info → llama_cloud-0.1.11.dist-info}/WHEEL +0 -0
|
@@ -15,10 +15,12 @@ from ...types.extract_job import ExtractJob
|
|
|
15
15
|
from ...types.extract_job_create import ExtractJobCreate
|
|
16
16
|
from ...types.extract_resultset import ExtractResultset
|
|
17
17
|
from ...types.extract_run import ExtractRun
|
|
18
|
+
from ...types.extract_schema_validate_response import ExtractSchemaValidateResponse
|
|
18
19
|
from ...types.http_validation_error import HttpValidationError
|
|
19
20
|
from ...types.llama_extract_settings import LlamaExtractSettings
|
|
20
|
-
from .types.
|
|
21
|
-
from .types.
|
|
21
|
+
from .types.extract_agent_create_data_schema import ExtractAgentCreateDataSchema
|
|
22
|
+
from .types.extract_agent_update_data_schema import ExtractAgentUpdateDataSchema
|
|
23
|
+
from .types.extract_schema_validate_request_data_schema import ExtractSchemaValidateRequestDataSchema
|
|
22
24
|
|
|
23
25
|
try:
|
|
24
26
|
import pydantic
|
|
@@ -36,14 +38,10 @@ class LlamaExtractClient:
|
|
|
36
38
|
def __init__(self, *, client_wrapper: SyncClientWrapper):
|
|
37
39
|
self._client_wrapper = client_wrapper
|
|
38
40
|
|
|
39
|
-
def list_extraction_agents(
|
|
40
|
-
self, *, project_id: typing.Optional[str] = None, name: typing.Optional[str] = None
|
|
41
|
-
) -> typing.List[ExtractAgent]:
|
|
41
|
+
def list_extraction_agents(self, *, project_id: typing.Optional[str] = None) -> typing.List[ExtractAgent]:
|
|
42
42
|
"""
|
|
43
43
|
Parameters:
|
|
44
44
|
- project_id: typing.Optional[str].
|
|
45
|
-
|
|
46
|
-
- name: typing.Optional[str].
|
|
47
45
|
---
|
|
48
46
|
from llama_cloud.client import LlamaCloud
|
|
49
47
|
|
|
@@ -55,7 +53,7 @@ class LlamaExtractClient:
|
|
|
55
53
|
_response = self._client_wrapper.httpx_client.request(
|
|
56
54
|
"GET",
|
|
57
55
|
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/extraction-agents"),
|
|
58
|
-
params=remove_none_from_dict({"project_id": project_id
|
|
56
|
+
params=remove_none_from_dict({"project_id": project_id}),
|
|
59
57
|
headers=self._client_wrapper.get_headers(),
|
|
60
58
|
timeout=60,
|
|
61
59
|
)
|
|
@@ -75,7 +73,7 @@ class LlamaExtractClient:
|
|
|
75
73
|
project_id: typing.Optional[str] = None,
|
|
76
74
|
organization_id: typing.Optional[str] = None,
|
|
77
75
|
name: str,
|
|
78
|
-
data_schema:
|
|
76
|
+
data_schema: ExtractAgentCreateDataSchema,
|
|
79
77
|
config: ExtractConfig,
|
|
80
78
|
) -> ExtractAgent:
|
|
81
79
|
"""
|
|
@@ -86,11 +84,11 @@ class LlamaExtractClient:
|
|
|
86
84
|
|
|
87
85
|
- name: str. The name of the extraction schema
|
|
88
86
|
|
|
89
|
-
- data_schema:
|
|
87
|
+
- data_schema: ExtractAgentCreateDataSchema. The schema of the data.
|
|
90
88
|
|
|
91
89
|
- config: ExtractConfig. The configuration parameters for the extraction agent.
|
|
92
90
|
---
|
|
93
|
-
from llama_cloud import ExtractConfig, ExtractMode
|
|
91
|
+
from llama_cloud import ExtractConfig, ExtractMode, ExtractTarget
|
|
94
92
|
from llama_cloud.client import LlamaCloud
|
|
95
93
|
|
|
96
94
|
client = LlamaCloud(
|
|
@@ -98,9 +96,9 @@ class LlamaExtractClient:
|
|
|
98
96
|
)
|
|
99
97
|
client.llama_extract.create_extraction_agent(
|
|
100
98
|
name="string",
|
|
101
|
-
data_schema={},
|
|
102
99
|
config=ExtractConfig(
|
|
103
|
-
|
|
100
|
+
extraction_target=ExtractTarget.PER_DOC,
|
|
101
|
+
extraction_mode=ExtractMode.FAST,
|
|
104
102
|
),
|
|
105
103
|
)
|
|
106
104
|
"""
|
|
@@ -122,6 +120,77 @@ class LlamaExtractClient:
|
|
|
122
120
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
123
121
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
124
122
|
|
|
123
|
+
def validate_extraction_schema(
|
|
124
|
+
self, *, data_schema: ExtractSchemaValidateRequestDataSchema
|
|
125
|
+
) -> ExtractSchemaValidateResponse:
|
|
126
|
+
"""
|
|
127
|
+
Validates an extraction agent's schema definition.
|
|
128
|
+
Returns the normalized and validated schema if valid, otherwise raises an HTTP 400.
|
|
129
|
+
|
|
130
|
+
Parameters:
|
|
131
|
+
- data_schema: ExtractSchemaValidateRequestDataSchema.
|
|
132
|
+
---
|
|
133
|
+
from llama_cloud.client import LlamaCloud
|
|
134
|
+
|
|
135
|
+
client = LlamaCloud(
|
|
136
|
+
token="YOUR_TOKEN",
|
|
137
|
+
)
|
|
138
|
+
client.llama_extract.validate_extraction_schema()
|
|
139
|
+
"""
|
|
140
|
+
_response = self._client_wrapper.httpx_client.request(
|
|
141
|
+
"POST",
|
|
142
|
+
urllib.parse.urljoin(
|
|
143
|
+
f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/extraction-agents/schema/validation"
|
|
144
|
+
),
|
|
145
|
+
json=jsonable_encoder({"data_schema": data_schema}),
|
|
146
|
+
headers=self._client_wrapper.get_headers(),
|
|
147
|
+
timeout=60,
|
|
148
|
+
)
|
|
149
|
+
if 200 <= _response.status_code < 300:
|
|
150
|
+
return pydantic.parse_obj_as(ExtractSchemaValidateResponse, _response.json()) # type: ignore
|
|
151
|
+
if _response.status_code == 422:
|
|
152
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
153
|
+
try:
|
|
154
|
+
_response_json = _response.json()
|
|
155
|
+
except JSONDecodeError:
|
|
156
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
157
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
158
|
+
|
|
159
|
+
def get_extraction_agent_by_name(self, name: str, *, project_id: typing.Optional[str] = None) -> ExtractAgent:
|
|
160
|
+
"""
|
|
161
|
+
Parameters:
|
|
162
|
+
- name: str.
|
|
163
|
+
|
|
164
|
+
- project_id: typing.Optional[str].
|
|
165
|
+
---
|
|
166
|
+
from llama_cloud.client import LlamaCloud
|
|
167
|
+
|
|
168
|
+
client = LlamaCloud(
|
|
169
|
+
token="YOUR_TOKEN",
|
|
170
|
+
)
|
|
171
|
+
client.llama_extract.get_extraction_agent_by_name(
|
|
172
|
+
name="string",
|
|
173
|
+
)
|
|
174
|
+
"""
|
|
175
|
+
_response = self._client_wrapper.httpx_client.request(
|
|
176
|
+
"GET",
|
|
177
|
+
urllib.parse.urljoin(
|
|
178
|
+
f"{self._client_wrapper.get_base_url()}/", f"api/v1/extractionv2/extraction-agents/by-name/{name}"
|
|
179
|
+
),
|
|
180
|
+
params=remove_none_from_dict({"project_id": project_id}),
|
|
181
|
+
headers=self._client_wrapper.get_headers(),
|
|
182
|
+
timeout=60,
|
|
183
|
+
)
|
|
184
|
+
if 200 <= _response.status_code < 300:
|
|
185
|
+
return pydantic.parse_obj_as(ExtractAgent, _response.json()) # type: ignore
|
|
186
|
+
if _response.status_code == 422:
|
|
187
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
188
|
+
try:
|
|
189
|
+
_response_json = _response.json()
|
|
190
|
+
except JSONDecodeError:
|
|
191
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
192
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
193
|
+
|
|
125
194
|
def get_extraction_agent(self, extraction_agent_id: str) -> ExtractAgent:
|
|
126
195
|
"""
|
|
127
196
|
Parameters:
|
|
@@ -156,21 +225,17 @@ class LlamaExtractClient:
|
|
|
156
225
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
157
226
|
|
|
158
227
|
def update_extraction_agent(
|
|
159
|
-
self,
|
|
160
|
-
extraction_agent_id: str,
|
|
161
|
-
*,
|
|
162
|
-
data_schema: typing.Dict[str, typing.Optional[ExtractAgentUpdateDataSchemaValue]],
|
|
163
|
-
config: ExtractConfig,
|
|
228
|
+
self, extraction_agent_id: str, *, data_schema: ExtractAgentUpdateDataSchema, config: ExtractConfig
|
|
164
229
|
) -> ExtractAgent:
|
|
165
230
|
"""
|
|
166
231
|
Parameters:
|
|
167
232
|
- extraction_agent_id: str.
|
|
168
233
|
|
|
169
|
-
- data_schema:
|
|
234
|
+
- data_schema: ExtractAgentUpdateDataSchema. The schema of the data
|
|
170
235
|
|
|
171
236
|
- config: ExtractConfig. The configuration parameters for the extraction agent.
|
|
172
237
|
---
|
|
173
|
-
from llama_cloud import ExtractConfig, ExtractMode
|
|
238
|
+
from llama_cloud import ExtractConfig, ExtractMode, ExtractTarget
|
|
174
239
|
from llama_cloud.client import LlamaCloud
|
|
175
240
|
|
|
176
241
|
client = LlamaCloud(
|
|
@@ -178,9 +243,9 @@ class LlamaExtractClient:
|
|
|
178
243
|
)
|
|
179
244
|
client.llama_extract.update_extraction_agent(
|
|
180
245
|
extraction_agent_id="string",
|
|
181
|
-
data_schema={},
|
|
182
246
|
config=ExtractConfig(
|
|
183
|
-
|
|
247
|
+
extraction_target=ExtractTarget.PER_DOC,
|
|
248
|
+
extraction_mode=ExtractMode.FAST,
|
|
184
249
|
),
|
|
185
250
|
)
|
|
186
251
|
"""
|
|
@@ -273,7 +338,12 @@ class LlamaExtractClient:
|
|
|
273
338
|
Parameters:
|
|
274
339
|
- request: ExtractJobCreate.
|
|
275
340
|
---
|
|
276
|
-
from llama_cloud import
|
|
341
|
+
from llama_cloud import (
|
|
342
|
+
ExtractConfig,
|
|
343
|
+
ExtractJobCreate,
|
|
344
|
+
ExtractMode,
|
|
345
|
+
ExtractTarget,
|
|
346
|
+
)
|
|
277
347
|
from llama_cloud.client import LlamaCloud
|
|
278
348
|
|
|
279
349
|
client = LlamaCloud(
|
|
@@ -284,7 +354,8 @@ class LlamaExtractClient:
|
|
|
284
354
|
extraction_agent_id="string",
|
|
285
355
|
file_id="string",
|
|
286
356
|
config_override=ExtractConfig(
|
|
287
|
-
|
|
357
|
+
extraction_target=ExtractTarget.PER_DOC,
|
|
358
|
+
extraction_mode=ExtractMode.FAST,
|
|
288
359
|
),
|
|
289
360
|
),
|
|
290
361
|
)
|
|
@@ -349,7 +420,9 @@ class LlamaExtractClient:
|
|
|
349
420
|
ExtractConfig,
|
|
350
421
|
ExtractJobCreate,
|
|
351
422
|
ExtractMode,
|
|
423
|
+
ExtractTarget,
|
|
352
424
|
LlamaExtractSettings,
|
|
425
|
+
LlamaParseParameters,
|
|
353
426
|
)
|
|
354
427
|
from llama_cloud.client import LlamaCloud
|
|
355
428
|
|
|
@@ -361,10 +434,13 @@ class LlamaExtractClient:
|
|
|
361
434
|
extraction_agent_id="string",
|
|
362
435
|
file_id="string",
|
|
363
436
|
config_override=ExtractConfig(
|
|
364
|
-
|
|
437
|
+
extraction_target=ExtractTarget.PER_DOC,
|
|
438
|
+
extraction_mode=ExtractMode.FAST,
|
|
365
439
|
),
|
|
366
440
|
),
|
|
367
|
-
extract_settings=LlamaExtractSettings(
|
|
441
|
+
extract_settings=LlamaExtractSettings(
|
|
442
|
+
llama_parse_params=LlamaParseParameters(),
|
|
443
|
+
),
|
|
368
444
|
)
|
|
369
445
|
"""
|
|
370
446
|
_request: typing.Dict[str, typing.Any] = {"job_create": job_create}
|
|
@@ -392,7 +468,12 @@ class LlamaExtractClient:
|
|
|
392
468
|
Parameters:
|
|
393
469
|
- request: ExtractJobCreate.
|
|
394
470
|
---
|
|
395
|
-
from llama_cloud import
|
|
471
|
+
from llama_cloud import (
|
|
472
|
+
ExtractConfig,
|
|
473
|
+
ExtractJobCreate,
|
|
474
|
+
ExtractMode,
|
|
475
|
+
ExtractTarget,
|
|
476
|
+
)
|
|
396
477
|
from llama_cloud.client import LlamaCloud
|
|
397
478
|
|
|
398
479
|
client = LlamaCloud(
|
|
@@ -403,7 +484,8 @@ class LlamaExtractClient:
|
|
|
403
484
|
extraction_agent_id="string",
|
|
404
485
|
file_id="string",
|
|
405
486
|
config_override=ExtractConfig(
|
|
406
|
-
|
|
487
|
+
extraction_target=ExtractTarget.PER_DOC,
|
|
488
|
+
extraction_mode=ExtractMode.FAST,
|
|
407
489
|
),
|
|
408
490
|
),
|
|
409
491
|
)
|
|
@@ -438,7 +520,9 @@ class LlamaExtractClient:
|
|
|
438
520
|
ExtractConfig,
|
|
439
521
|
ExtractJobCreate,
|
|
440
522
|
ExtractMode,
|
|
523
|
+
ExtractTarget,
|
|
441
524
|
LlamaExtractSettings,
|
|
525
|
+
LlamaParseParameters,
|
|
442
526
|
)
|
|
443
527
|
from llama_cloud.client import LlamaCloud
|
|
444
528
|
|
|
@@ -450,10 +534,13 @@ class LlamaExtractClient:
|
|
|
450
534
|
extraction_agent_id="string",
|
|
451
535
|
file_id="string",
|
|
452
536
|
config_override=ExtractConfig(
|
|
453
|
-
|
|
537
|
+
extraction_target=ExtractTarget.PER_DOC,
|
|
538
|
+
extraction_mode=ExtractMode.FAST,
|
|
454
539
|
),
|
|
455
540
|
),
|
|
456
|
-
extract_settings=LlamaExtractSettings(
|
|
541
|
+
extract_settings=LlamaExtractSettings(
|
|
542
|
+
llama_parse_params=LlamaParseParameters(),
|
|
543
|
+
),
|
|
457
544
|
)
|
|
458
545
|
"""
|
|
459
546
|
_request: typing.Dict[str, typing.Any] = {"job_create": job_create}
|
|
@@ -476,32 +563,61 @@ class LlamaExtractClient:
|
|
|
476
563
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
477
564
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
478
565
|
|
|
479
|
-
def
|
|
566
|
+
def get_job_result(self, job_id: str) -> ExtractResultset:
|
|
480
567
|
"""
|
|
481
568
|
Parameters:
|
|
482
|
-
-
|
|
569
|
+
- job_id: str.
|
|
570
|
+
---
|
|
571
|
+
from llama_cloud.client import LlamaCloud
|
|
483
572
|
|
|
484
|
-
|
|
573
|
+
client = LlamaCloud(
|
|
574
|
+
token="YOUR_TOKEN",
|
|
575
|
+
)
|
|
576
|
+
client.llama_extract.get_job_result(
|
|
577
|
+
job_id="string",
|
|
578
|
+
)
|
|
579
|
+
"""
|
|
580
|
+
_response = self._client_wrapper.httpx_client.request(
|
|
581
|
+
"GET",
|
|
582
|
+
urllib.parse.urljoin(
|
|
583
|
+
f"{self._client_wrapper.get_base_url()}/", f"api/v1/extractionv2/jobs/{job_id}/result"
|
|
584
|
+
),
|
|
585
|
+
headers=self._client_wrapper.get_headers(),
|
|
586
|
+
timeout=60,
|
|
587
|
+
)
|
|
588
|
+
if 200 <= _response.status_code < 300:
|
|
589
|
+
return pydantic.parse_obj_as(ExtractResultset, _response.json()) # type: ignore
|
|
590
|
+
if _response.status_code == 422:
|
|
591
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
592
|
+
try:
|
|
593
|
+
_response_json = _response.json()
|
|
594
|
+
except JSONDecodeError:
|
|
595
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
596
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
597
|
+
|
|
598
|
+
def list_extract_runs(self, *, extraction_agent_id: str) -> typing.List[ExtractRun]:
|
|
599
|
+
"""
|
|
600
|
+
Parameters:
|
|
601
|
+
- extraction_agent_id: str.
|
|
485
602
|
---
|
|
486
603
|
from llama_cloud.client import LlamaCloud
|
|
487
604
|
|
|
488
605
|
client = LlamaCloud(
|
|
489
606
|
token="YOUR_TOKEN",
|
|
490
607
|
)
|
|
491
|
-
client.llama_extract.
|
|
608
|
+
client.llama_extract.list_extract_runs(
|
|
492
609
|
extraction_agent_id="string",
|
|
493
|
-
file_ids=[],
|
|
494
610
|
)
|
|
495
611
|
"""
|
|
496
612
|
_response = self._client_wrapper.httpx_client.request(
|
|
497
|
-
"
|
|
498
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/
|
|
499
|
-
|
|
613
|
+
"GET",
|
|
614
|
+
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/runs"),
|
|
615
|
+
params=remove_none_from_dict({"extraction_agent_id": extraction_agent_id}),
|
|
500
616
|
headers=self._client_wrapper.get_headers(),
|
|
501
617
|
timeout=60,
|
|
502
618
|
)
|
|
503
619
|
if 200 <= _response.status_code < 300:
|
|
504
|
-
return pydantic.parse_obj_as(typing.List[
|
|
620
|
+
return pydantic.parse_obj_as(typing.List[ExtractRun], _response.json()) # type: ignore
|
|
505
621
|
if _response.status_code == 422:
|
|
506
622
|
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
507
623
|
try:
|
|
@@ -510,7 +626,7 @@ class LlamaExtractClient:
|
|
|
510
626
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
511
627
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
512
628
|
|
|
513
|
-
def
|
|
629
|
+
def get_run_by_job_id(self, job_id: str) -> ExtractRun:
|
|
514
630
|
"""
|
|
515
631
|
Parameters:
|
|
516
632
|
- job_id: str.
|
|
@@ -520,20 +636,20 @@ class LlamaExtractClient:
|
|
|
520
636
|
client = LlamaCloud(
|
|
521
637
|
token="YOUR_TOKEN",
|
|
522
638
|
)
|
|
523
|
-
client.llama_extract.
|
|
639
|
+
client.llama_extract.get_run_by_job_id(
|
|
524
640
|
job_id="string",
|
|
525
641
|
)
|
|
526
642
|
"""
|
|
527
643
|
_response = self._client_wrapper.httpx_client.request(
|
|
528
644
|
"GET",
|
|
529
645
|
urllib.parse.urljoin(
|
|
530
|
-
f"{self._client_wrapper.get_base_url()}/", f"api/v1/extractionv2/
|
|
646
|
+
f"{self._client_wrapper.get_base_url()}/", f"api/v1/extractionv2/runs/by-job/{job_id}"
|
|
531
647
|
),
|
|
532
648
|
headers=self._client_wrapper.get_headers(),
|
|
533
649
|
timeout=60,
|
|
534
650
|
)
|
|
535
651
|
if 200 <= _response.status_code < 300:
|
|
536
|
-
return pydantic.parse_obj_as(
|
|
652
|
+
return pydantic.parse_obj_as(ExtractRun, _response.json()) # type: ignore
|
|
537
653
|
if _response.status_code == 422:
|
|
538
654
|
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
539
655
|
try:
|
|
@@ -542,39 +658,28 @@ class LlamaExtractClient:
|
|
|
542
658
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
543
659
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
544
660
|
|
|
545
|
-
def
|
|
546
|
-
self,
|
|
547
|
-
*,
|
|
548
|
-
extraction_agent_id: typing.Optional[str] = None,
|
|
549
|
-
run_id: typing.Optional[str] = None,
|
|
550
|
-
job_id: typing.Optional[str] = None,
|
|
551
|
-
) -> typing.List[ExtractRun]:
|
|
661
|
+
def get_run(self, run_id: str) -> ExtractRun:
|
|
552
662
|
"""
|
|
553
663
|
Parameters:
|
|
554
|
-
-
|
|
555
|
-
|
|
556
|
-
- run_id: typing.Optional[str].
|
|
557
|
-
|
|
558
|
-
- job_id: typing.Optional[str].
|
|
664
|
+
- run_id: str.
|
|
559
665
|
---
|
|
560
666
|
from llama_cloud.client import LlamaCloud
|
|
561
667
|
|
|
562
668
|
client = LlamaCloud(
|
|
563
669
|
token="YOUR_TOKEN",
|
|
564
670
|
)
|
|
565
|
-
client.llama_extract.
|
|
671
|
+
client.llama_extract.get_run(
|
|
672
|
+
run_id="string",
|
|
673
|
+
)
|
|
566
674
|
"""
|
|
567
675
|
_response = self._client_wrapper.httpx_client.request(
|
|
568
676
|
"GET",
|
|
569
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/runs"),
|
|
570
|
-
params=remove_none_from_dict(
|
|
571
|
-
{"extraction_agent_id": extraction_agent_id, "run_id": run_id, "job_id": job_id}
|
|
572
|
-
),
|
|
677
|
+
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/extractionv2/runs/{run_id}"),
|
|
573
678
|
headers=self._client_wrapper.get_headers(),
|
|
574
679
|
timeout=60,
|
|
575
680
|
)
|
|
576
681
|
if 200 <= _response.status_code < 300:
|
|
577
|
-
return pydantic.parse_obj_as(
|
|
682
|
+
return pydantic.parse_obj_as(ExtractRun, _response.json()) # type: ignore
|
|
578
683
|
if _response.status_code == 422:
|
|
579
684
|
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
580
685
|
try:
|
|
@@ -588,14 +693,10 @@ class AsyncLlamaExtractClient:
|
|
|
588
693
|
def __init__(self, *, client_wrapper: AsyncClientWrapper):
|
|
589
694
|
self._client_wrapper = client_wrapper
|
|
590
695
|
|
|
591
|
-
async def list_extraction_agents(
|
|
592
|
-
self, *, project_id: typing.Optional[str] = None, name: typing.Optional[str] = None
|
|
593
|
-
) -> typing.List[ExtractAgent]:
|
|
696
|
+
async def list_extraction_agents(self, *, project_id: typing.Optional[str] = None) -> typing.List[ExtractAgent]:
|
|
594
697
|
"""
|
|
595
698
|
Parameters:
|
|
596
699
|
- project_id: typing.Optional[str].
|
|
597
|
-
|
|
598
|
-
- name: typing.Optional[str].
|
|
599
700
|
---
|
|
600
701
|
from llama_cloud.client import AsyncLlamaCloud
|
|
601
702
|
|
|
@@ -607,7 +708,7 @@ class AsyncLlamaExtractClient:
|
|
|
607
708
|
_response = await self._client_wrapper.httpx_client.request(
|
|
608
709
|
"GET",
|
|
609
710
|
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/extraction-agents"),
|
|
610
|
-
params=remove_none_from_dict({"project_id": project_id
|
|
711
|
+
params=remove_none_from_dict({"project_id": project_id}),
|
|
611
712
|
headers=self._client_wrapper.get_headers(),
|
|
612
713
|
timeout=60,
|
|
613
714
|
)
|
|
@@ -627,7 +728,7 @@ class AsyncLlamaExtractClient:
|
|
|
627
728
|
project_id: typing.Optional[str] = None,
|
|
628
729
|
organization_id: typing.Optional[str] = None,
|
|
629
730
|
name: str,
|
|
630
|
-
data_schema:
|
|
731
|
+
data_schema: ExtractAgentCreateDataSchema,
|
|
631
732
|
config: ExtractConfig,
|
|
632
733
|
) -> ExtractAgent:
|
|
633
734
|
"""
|
|
@@ -638,11 +739,11 @@ class AsyncLlamaExtractClient:
|
|
|
638
739
|
|
|
639
740
|
- name: str. The name of the extraction schema
|
|
640
741
|
|
|
641
|
-
- data_schema:
|
|
742
|
+
- data_schema: ExtractAgentCreateDataSchema. The schema of the data.
|
|
642
743
|
|
|
643
744
|
- config: ExtractConfig. The configuration parameters for the extraction agent.
|
|
644
745
|
---
|
|
645
|
-
from llama_cloud import ExtractConfig, ExtractMode
|
|
746
|
+
from llama_cloud import ExtractConfig, ExtractMode, ExtractTarget
|
|
646
747
|
from llama_cloud.client import AsyncLlamaCloud
|
|
647
748
|
|
|
648
749
|
client = AsyncLlamaCloud(
|
|
@@ -650,9 +751,9 @@ class AsyncLlamaExtractClient:
|
|
|
650
751
|
)
|
|
651
752
|
await client.llama_extract.create_extraction_agent(
|
|
652
753
|
name="string",
|
|
653
|
-
data_schema={},
|
|
654
754
|
config=ExtractConfig(
|
|
655
|
-
|
|
755
|
+
extraction_target=ExtractTarget.PER_DOC,
|
|
756
|
+
extraction_mode=ExtractMode.FAST,
|
|
656
757
|
),
|
|
657
758
|
)
|
|
658
759
|
"""
|
|
@@ -674,6 +775,77 @@ class AsyncLlamaExtractClient:
|
|
|
674
775
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
675
776
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
676
777
|
|
|
778
|
+
async def validate_extraction_schema(
|
|
779
|
+
self, *, data_schema: ExtractSchemaValidateRequestDataSchema
|
|
780
|
+
) -> ExtractSchemaValidateResponse:
|
|
781
|
+
"""
|
|
782
|
+
Validates an extraction agent's schema definition.
|
|
783
|
+
Returns the normalized and validated schema if valid, otherwise raises an HTTP 400.
|
|
784
|
+
|
|
785
|
+
Parameters:
|
|
786
|
+
- data_schema: ExtractSchemaValidateRequestDataSchema.
|
|
787
|
+
---
|
|
788
|
+
from llama_cloud.client import AsyncLlamaCloud
|
|
789
|
+
|
|
790
|
+
client = AsyncLlamaCloud(
|
|
791
|
+
token="YOUR_TOKEN",
|
|
792
|
+
)
|
|
793
|
+
await client.llama_extract.validate_extraction_schema()
|
|
794
|
+
"""
|
|
795
|
+
_response = await self._client_wrapper.httpx_client.request(
|
|
796
|
+
"POST",
|
|
797
|
+
urllib.parse.urljoin(
|
|
798
|
+
f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/extraction-agents/schema/validation"
|
|
799
|
+
),
|
|
800
|
+
json=jsonable_encoder({"data_schema": data_schema}),
|
|
801
|
+
headers=self._client_wrapper.get_headers(),
|
|
802
|
+
timeout=60,
|
|
803
|
+
)
|
|
804
|
+
if 200 <= _response.status_code < 300:
|
|
805
|
+
return pydantic.parse_obj_as(ExtractSchemaValidateResponse, _response.json()) # type: ignore
|
|
806
|
+
if _response.status_code == 422:
|
|
807
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
808
|
+
try:
|
|
809
|
+
_response_json = _response.json()
|
|
810
|
+
except JSONDecodeError:
|
|
811
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
812
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
813
|
+
|
|
814
|
+
async def get_extraction_agent_by_name(self, name: str, *, project_id: typing.Optional[str] = None) -> ExtractAgent:
|
|
815
|
+
"""
|
|
816
|
+
Parameters:
|
|
817
|
+
- name: str.
|
|
818
|
+
|
|
819
|
+
- project_id: typing.Optional[str].
|
|
820
|
+
---
|
|
821
|
+
from llama_cloud.client import AsyncLlamaCloud
|
|
822
|
+
|
|
823
|
+
client = AsyncLlamaCloud(
|
|
824
|
+
token="YOUR_TOKEN",
|
|
825
|
+
)
|
|
826
|
+
await client.llama_extract.get_extraction_agent_by_name(
|
|
827
|
+
name="string",
|
|
828
|
+
)
|
|
829
|
+
"""
|
|
830
|
+
_response = await self._client_wrapper.httpx_client.request(
|
|
831
|
+
"GET",
|
|
832
|
+
urllib.parse.urljoin(
|
|
833
|
+
f"{self._client_wrapper.get_base_url()}/", f"api/v1/extractionv2/extraction-agents/by-name/{name}"
|
|
834
|
+
),
|
|
835
|
+
params=remove_none_from_dict({"project_id": project_id}),
|
|
836
|
+
headers=self._client_wrapper.get_headers(),
|
|
837
|
+
timeout=60,
|
|
838
|
+
)
|
|
839
|
+
if 200 <= _response.status_code < 300:
|
|
840
|
+
return pydantic.parse_obj_as(ExtractAgent, _response.json()) # type: ignore
|
|
841
|
+
if _response.status_code == 422:
|
|
842
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
843
|
+
try:
|
|
844
|
+
_response_json = _response.json()
|
|
845
|
+
except JSONDecodeError:
|
|
846
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
847
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
848
|
+
|
|
677
849
|
async def get_extraction_agent(self, extraction_agent_id: str) -> ExtractAgent:
|
|
678
850
|
"""
|
|
679
851
|
Parameters:
|
|
@@ -708,21 +880,17 @@ class AsyncLlamaExtractClient:
|
|
|
708
880
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
709
881
|
|
|
710
882
|
async def update_extraction_agent(
|
|
711
|
-
self,
|
|
712
|
-
extraction_agent_id: str,
|
|
713
|
-
*,
|
|
714
|
-
data_schema: typing.Dict[str, typing.Optional[ExtractAgentUpdateDataSchemaValue]],
|
|
715
|
-
config: ExtractConfig,
|
|
883
|
+
self, extraction_agent_id: str, *, data_schema: ExtractAgentUpdateDataSchema, config: ExtractConfig
|
|
716
884
|
) -> ExtractAgent:
|
|
717
885
|
"""
|
|
718
886
|
Parameters:
|
|
719
887
|
- extraction_agent_id: str.
|
|
720
888
|
|
|
721
|
-
- data_schema:
|
|
889
|
+
- data_schema: ExtractAgentUpdateDataSchema. The schema of the data
|
|
722
890
|
|
|
723
891
|
- config: ExtractConfig. The configuration parameters for the extraction agent.
|
|
724
892
|
---
|
|
725
|
-
from llama_cloud import ExtractConfig, ExtractMode
|
|
893
|
+
from llama_cloud import ExtractConfig, ExtractMode, ExtractTarget
|
|
726
894
|
from llama_cloud.client import AsyncLlamaCloud
|
|
727
895
|
|
|
728
896
|
client = AsyncLlamaCloud(
|
|
@@ -730,9 +898,9 @@ class AsyncLlamaExtractClient:
|
|
|
730
898
|
)
|
|
731
899
|
await client.llama_extract.update_extraction_agent(
|
|
732
900
|
extraction_agent_id="string",
|
|
733
|
-
data_schema={},
|
|
734
901
|
config=ExtractConfig(
|
|
735
|
-
|
|
902
|
+
extraction_target=ExtractTarget.PER_DOC,
|
|
903
|
+
extraction_mode=ExtractMode.FAST,
|
|
736
904
|
),
|
|
737
905
|
)
|
|
738
906
|
"""
|
|
@@ -825,7 +993,12 @@ class AsyncLlamaExtractClient:
|
|
|
825
993
|
Parameters:
|
|
826
994
|
- request: ExtractJobCreate.
|
|
827
995
|
---
|
|
828
|
-
from llama_cloud import
|
|
996
|
+
from llama_cloud import (
|
|
997
|
+
ExtractConfig,
|
|
998
|
+
ExtractJobCreate,
|
|
999
|
+
ExtractMode,
|
|
1000
|
+
ExtractTarget,
|
|
1001
|
+
)
|
|
829
1002
|
from llama_cloud.client import AsyncLlamaCloud
|
|
830
1003
|
|
|
831
1004
|
client = AsyncLlamaCloud(
|
|
@@ -836,7 +1009,8 @@ class AsyncLlamaExtractClient:
|
|
|
836
1009
|
extraction_agent_id="string",
|
|
837
1010
|
file_id="string",
|
|
838
1011
|
config_override=ExtractConfig(
|
|
839
|
-
|
|
1012
|
+
extraction_target=ExtractTarget.PER_DOC,
|
|
1013
|
+
extraction_mode=ExtractMode.FAST,
|
|
840
1014
|
),
|
|
841
1015
|
),
|
|
842
1016
|
)
|
|
@@ -901,7 +1075,9 @@ class AsyncLlamaExtractClient:
|
|
|
901
1075
|
ExtractConfig,
|
|
902
1076
|
ExtractJobCreate,
|
|
903
1077
|
ExtractMode,
|
|
1078
|
+
ExtractTarget,
|
|
904
1079
|
LlamaExtractSettings,
|
|
1080
|
+
LlamaParseParameters,
|
|
905
1081
|
)
|
|
906
1082
|
from llama_cloud.client import AsyncLlamaCloud
|
|
907
1083
|
|
|
@@ -913,10 +1089,13 @@ class AsyncLlamaExtractClient:
|
|
|
913
1089
|
extraction_agent_id="string",
|
|
914
1090
|
file_id="string",
|
|
915
1091
|
config_override=ExtractConfig(
|
|
916
|
-
|
|
1092
|
+
extraction_target=ExtractTarget.PER_DOC,
|
|
1093
|
+
extraction_mode=ExtractMode.FAST,
|
|
917
1094
|
),
|
|
918
1095
|
),
|
|
919
|
-
extract_settings=LlamaExtractSettings(
|
|
1096
|
+
extract_settings=LlamaExtractSettings(
|
|
1097
|
+
llama_parse_params=LlamaParseParameters(),
|
|
1098
|
+
),
|
|
920
1099
|
)
|
|
921
1100
|
"""
|
|
922
1101
|
_request: typing.Dict[str, typing.Any] = {"job_create": job_create}
|
|
@@ -944,7 +1123,12 @@ class AsyncLlamaExtractClient:
|
|
|
944
1123
|
Parameters:
|
|
945
1124
|
- request: ExtractJobCreate.
|
|
946
1125
|
---
|
|
947
|
-
from llama_cloud import
|
|
1126
|
+
from llama_cloud import (
|
|
1127
|
+
ExtractConfig,
|
|
1128
|
+
ExtractJobCreate,
|
|
1129
|
+
ExtractMode,
|
|
1130
|
+
ExtractTarget,
|
|
1131
|
+
)
|
|
948
1132
|
from llama_cloud.client import AsyncLlamaCloud
|
|
949
1133
|
|
|
950
1134
|
client = AsyncLlamaCloud(
|
|
@@ -955,7 +1139,8 @@ class AsyncLlamaExtractClient:
|
|
|
955
1139
|
extraction_agent_id="string",
|
|
956
1140
|
file_id="string",
|
|
957
1141
|
config_override=ExtractConfig(
|
|
958
|
-
|
|
1142
|
+
extraction_target=ExtractTarget.PER_DOC,
|
|
1143
|
+
extraction_mode=ExtractMode.FAST,
|
|
959
1144
|
),
|
|
960
1145
|
),
|
|
961
1146
|
)
|
|
@@ -990,7 +1175,9 @@ class AsyncLlamaExtractClient:
|
|
|
990
1175
|
ExtractConfig,
|
|
991
1176
|
ExtractJobCreate,
|
|
992
1177
|
ExtractMode,
|
|
1178
|
+
ExtractTarget,
|
|
993
1179
|
LlamaExtractSettings,
|
|
1180
|
+
LlamaParseParameters,
|
|
994
1181
|
)
|
|
995
1182
|
from llama_cloud.client import AsyncLlamaCloud
|
|
996
1183
|
|
|
@@ -1002,10 +1189,13 @@ class AsyncLlamaExtractClient:
|
|
|
1002
1189
|
extraction_agent_id="string",
|
|
1003
1190
|
file_id="string",
|
|
1004
1191
|
config_override=ExtractConfig(
|
|
1005
|
-
|
|
1192
|
+
extraction_target=ExtractTarget.PER_DOC,
|
|
1193
|
+
extraction_mode=ExtractMode.FAST,
|
|
1006
1194
|
),
|
|
1007
1195
|
),
|
|
1008
|
-
extract_settings=LlamaExtractSettings(
|
|
1196
|
+
extract_settings=LlamaExtractSettings(
|
|
1197
|
+
llama_parse_params=LlamaParseParameters(),
|
|
1198
|
+
),
|
|
1009
1199
|
)
|
|
1010
1200
|
"""
|
|
1011
1201
|
_request: typing.Dict[str, typing.Any] = {"job_create": job_create}
|
|
@@ -1028,34 +1218,61 @@ class AsyncLlamaExtractClient:
|
|
|
1028
1218
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
1029
1219
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
1030
1220
|
|
|
1031
|
-
async def
|
|
1032
|
-
self, *, extraction_agent_id: str, file_ids: typing.List[str]
|
|
1033
|
-
) -> typing.List[ExtractJob]:
|
|
1221
|
+
async def get_job_result(self, job_id: str) -> ExtractResultset:
|
|
1034
1222
|
"""
|
|
1035
1223
|
Parameters:
|
|
1036
|
-
-
|
|
1224
|
+
- job_id: str.
|
|
1225
|
+
---
|
|
1226
|
+
from llama_cloud.client import AsyncLlamaCloud
|
|
1037
1227
|
|
|
1038
|
-
|
|
1228
|
+
client = AsyncLlamaCloud(
|
|
1229
|
+
token="YOUR_TOKEN",
|
|
1230
|
+
)
|
|
1231
|
+
await client.llama_extract.get_job_result(
|
|
1232
|
+
job_id="string",
|
|
1233
|
+
)
|
|
1234
|
+
"""
|
|
1235
|
+
_response = await self._client_wrapper.httpx_client.request(
|
|
1236
|
+
"GET",
|
|
1237
|
+
urllib.parse.urljoin(
|
|
1238
|
+
f"{self._client_wrapper.get_base_url()}/", f"api/v1/extractionv2/jobs/{job_id}/result"
|
|
1239
|
+
),
|
|
1240
|
+
headers=self._client_wrapper.get_headers(),
|
|
1241
|
+
timeout=60,
|
|
1242
|
+
)
|
|
1243
|
+
if 200 <= _response.status_code < 300:
|
|
1244
|
+
return pydantic.parse_obj_as(ExtractResultset, _response.json()) # type: ignore
|
|
1245
|
+
if _response.status_code == 422:
|
|
1246
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
1247
|
+
try:
|
|
1248
|
+
_response_json = _response.json()
|
|
1249
|
+
except JSONDecodeError:
|
|
1250
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
1251
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
1252
|
+
|
|
1253
|
+
async def list_extract_runs(self, *, extraction_agent_id: str) -> typing.List[ExtractRun]:
|
|
1254
|
+
"""
|
|
1255
|
+
Parameters:
|
|
1256
|
+
- extraction_agent_id: str.
|
|
1039
1257
|
---
|
|
1040
1258
|
from llama_cloud.client import AsyncLlamaCloud
|
|
1041
1259
|
|
|
1042
1260
|
client = AsyncLlamaCloud(
|
|
1043
1261
|
token="YOUR_TOKEN",
|
|
1044
1262
|
)
|
|
1045
|
-
await client.llama_extract.
|
|
1263
|
+
await client.llama_extract.list_extract_runs(
|
|
1046
1264
|
extraction_agent_id="string",
|
|
1047
|
-
file_ids=[],
|
|
1048
1265
|
)
|
|
1049
1266
|
"""
|
|
1050
1267
|
_response = await self._client_wrapper.httpx_client.request(
|
|
1051
|
-
"
|
|
1052
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/
|
|
1053
|
-
|
|
1268
|
+
"GET",
|
|
1269
|
+
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/runs"),
|
|
1270
|
+
params=remove_none_from_dict({"extraction_agent_id": extraction_agent_id}),
|
|
1054
1271
|
headers=self._client_wrapper.get_headers(),
|
|
1055
1272
|
timeout=60,
|
|
1056
1273
|
)
|
|
1057
1274
|
if 200 <= _response.status_code < 300:
|
|
1058
|
-
return pydantic.parse_obj_as(typing.List[
|
|
1275
|
+
return pydantic.parse_obj_as(typing.List[ExtractRun], _response.json()) # type: ignore
|
|
1059
1276
|
if _response.status_code == 422:
|
|
1060
1277
|
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
1061
1278
|
try:
|
|
@@ -1064,7 +1281,7 @@ class AsyncLlamaExtractClient:
|
|
|
1064
1281
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
1065
1282
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
1066
1283
|
|
|
1067
|
-
async def
|
|
1284
|
+
async def get_run_by_job_id(self, job_id: str) -> ExtractRun:
|
|
1068
1285
|
"""
|
|
1069
1286
|
Parameters:
|
|
1070
1287
|
- job_id: str.
|
|
@@ -1074,20 +1291,20 @@ class AsyncLlamaExtractClient:
|
|
|
1074
1291
|
client = AsyncLlamaCloud(
|
|
1075
1292
|
token="YOUR_TOKEN",
|
|
1076
1293
|
)
|
|
1077
|
-
await client.llama_extract.
|
|
1294
|
+
await client.llama_extract.get_run_by_job_id(
|
|
1078
1295
|
job_id="string",
|
|
1079
1296
|
)
|
|
1080
1297
|
"""
|
|
1081
1298
|
_response = await self._client_wrapper.httpx_client.request(
|
|
1082
1299
|
"GET",
|
|
1083
1300
|
urllib.parse.urljoin(
|
|
1084
|
-
f"{self._client_wrapper.get_base_url()}/", f"api/v1/extractionv2/
|
|
1301
|
+
f"{self._client_wrapper.get_base_url()}/", f"api/v1/extractionv2/runs/by-job/{job_id}"
|
|
1085
1302
|
),
|
|
1086
1303
|
headers=self._client_wrapper.get_headers(),
|
|
1087
1304
|
timeout=60,
|
|
1088
1305
|
)
|
|
1089
1306
|
if 200 <= _response.status_code < 300:
|
|
1090
|
-
return pydantic.parse_obj_as(
|
|
1307
|
+
return pydantic.parse_obj_as(ExtractRun, _response.json()) # type: ignore
|
|
1091
1308
|
if _response.status_code == 422:
|
|
1092
1309
|
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
1093
1310
|
try:
|
|
@@ -1096,39 +1313,28 @@ class AsyncLlamaExtractClient:
|
|
|
1096
1313
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
1097
1314
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
1098
1315
|
|
|
1099
|
-
async def
|
|
1100
|
-
self,
|
|
1101
|
-
*,
|
|
1102
|
-
extraction_agent_id: typing.Optional[str] = None,
|
|
1103
|
-
run_id: typing.Optional[str] = None,
|
|
1104
|
-
job_id: typing.Optional[str] = None,
|
|
1105
|
-
) -> typing.List[ExtractRun]:
|
|
1316
|
+
async def get_run(self, run_id: str) -> ExtractRun:
|
|
1106
1317
|
"""
|
|
1107
1318
|
Parameters:
|
|
1108
|
-
-
|
|
1109
|
-
|
|
1110
|
-
- run_id: typing.Optional[str].
|
|
1111
|
-
|
|
1112
|
-
- job_id: typing.Optional[str].
|
|
1319
|
+
- run_id: str.
|
|
1113
1320
|
---
|
|
1114
1321
|
from llama_cloud.client import AsyncLlamaCloud
|
|
1115
1322
|
|
|
1116
1323
|
client = AsyncLlamaCloud(
|
|
1117
1324
|
token="YOUR_TOKEN",
|
|
1118
1325
|
)
|
|
1119
|
-
await client.llama_extract.
|
|
1326
|
+
await client.llama_extract.get_run(
|
|
1327
|
+
run_id="string",
|
|
1328
|
+
)
|
|
1120
1329
|
"""
|
|
1121
1330
|
_response = await self._client_wrapper.httpx_client.request(
|
|
1122
1331
|
"GET",
|
|
1123
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/runs"),
|
|
1124
|
-
params=remove_none_from_dict(
|
|
1125
|
-
{"extraction_agent_id": extraction_agent_id, "run_id": run_id, "job_id": job_id}
|
|
1126
|
-
),
|
|
1332
|
+
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/extractionv2/runs/{run_id}"),
|
|
1127
1333
|
headers=self._client_wrapper.get_headers(),
|
|
1128
1334
|
timeout=60,
|
|
1129
1335
|
)
|
|
1130
1336
|
if 200 <= _response.status_code < 300:
|
|
1131
|
-
return pydantic.parse_obj_as(
|
|
1337
|
+
return pydantic.parse_obj_as(ExtractRun, _response.json()) # type: ignore
|
|
1132
1338
|
if _response.status_code == 422:
|
|
1133
1339
|
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
1134
1340
|
try:
|