llama-cloud 0.1.13__py3-none-any.whl → 0.1.14__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of llama-cloud might be problematic. Click here for more details.
- llama_cloud/__init__.py +36 -10
- llama_cloud/resources/__init__.py +0 -14
- llama_cloud/resources/llama_extract/__init__.py +0 -17
- llama_cloud/resources/llama_extract/client.py +105 -318
- llama_cloud/resources/organizations/client.py +15 -5
- llama_cloud/resources/parsing/client.py +24 -0
- llama_cloud/resources/pipelines/client.py +145 -10
- llama_cloud/resources/projects/client.py +25 -9
- llama_cloud/resources/reports/client.py +16 -6
- llama_cloud/types/__init__.py +42 -4
- llama_cloud/types/{plan.py → base_plan.py} +16 -13
- llama_cloud/types/base_plan_metronome_plan_type.py +17 -0
- llama_cloud/types/base_plan_name.py +45 -0
- llama_cloud/types/base_plan_plan_frequency.py +25 -0
- llama_cloud/types/billing_period.py +32 -0
- llama_cloud/types/credit_type.py +32 -0
- llama_cloud/types/data_source.py +1 -0
- llama_cloud/types/extract_agent_create.py +39 -0
- llama_cloud/types/extract_agent_update.py +38 -0
- llama_cloud/types/extract_schema_validate_request.py +32 -0
- llama_cloud/types/free_credits_usage.py +34 -0
- llama_cloud/types/llama_parse_parameters.py +3 -0
- llama_cloud/types/paginated_list_cloud_documents_response.py +35 -0
- llama_cloud/types/pipeline_data_source.py +1 -0
- llama_cloud/types/pipeline_file.py +1 -0
- llama_cloud/types/plan_limits.py +52 -0
- llama_cloud/types/recurring_credit_grant.py +44 -0
- llama_cloud/types/usage.py +5 -4
- llama_cloud/types/usage_active_alerts_item.py +25 -0
- llama_cloud/types/{interval_usage_and_plan.py → usage_and_plan.py} +4 -6
- {llama_cloud-0.1.13.dist-info → llama_cloud-0.1.14.dist-info}/METADATA +3 -1
- {llama_cloud-0.1.13.dist-info → llama_cloud-0.1.14.dist-info}/RECORD +40 -28
- {llama_cloud-0.1.13.dist-info → llama_cloud-0.1.14.dist-info}/WHEEL +1 -1
- llama_cloud/resources/llama_extract/types/__init__.py +0 -17
- /llama_cloud/{resources/llama_extract/types → types}/extract_agent_create_data_schema.py +0 -0
- /llama_cloud/{resources/llama_extract/types → types}/extract_agent_create_data_schema_zero_value.py +0 -0
- /llama_cloud/{resources/llama_extract/types → types}/extract_agent_update_data_schema.py +0 -0
- /llama_cloud/{resources/llama_extract/types → types}/extract_agent_update_data_schema_zero_value.py +0 -0
- /llama_cloud/{resources/llama_extract/types → types}/extract_schema_validate_request_data_schema.py +0 -0
- /llama_cloud/{resources/llama_extract/types → types}/extract_schema_validate_request_data_schema_zero_value.py +0 -0
- {llama_cloud-0.1.13.dist-info → llama_cloud-0.1.14.dist-info}/LICENSE +0 -0
|
@@ -10,17 +10,16 @@ from ...core.jsonable_encoder import jsonable_encoder
|
|
|
10
10
|
from ...core.remove_none_from_dict import remove_none_from_dict
|
|
11
11
|
from ...errors.unprocessable_entity_error import UnprocessableEntityError
|
|
12
12
|
from ...types.extract_agent import ExtractAgent
|
|
13
|
-
from ...types.
|
|
13
|
+
from ...types.extract_agent_create import ExtractAgentCreate
|
|
14
|
+
from ...types.extract_agent_update import ExtractAgentUpdate
|
|
14
15
|
from ...types.extract_job import ExtractJob
|
|
15
16
|
from ...types.extract_job_create import ExtractJobCreate
|
|
16
17
|
from ...types.extract_resultset import ExtractResultset
|
|
17
18
|
from ...types.extract_run import ExtractRun
|
|
19
|
+
from ...types.extract_schema_validate_request import ExtractSchemaValidateRequest
|
|
18
20
|
from ...types.extract_schema_validate_response import ExtractSchemaValidateResponse
|
|
19
21
|
from ...types.http_validation_error import HttpValidationError
|
|
20
22
|
from ...types.llama_extract_settings import LlamaExtractSettings
|
|
21
|
-
from .types.extract_agent_create_data_schema import ExtractAgentCreateDataSchema
|
|
22
|
-
from .types.extract_agent_update_data_schema import ExtractAgentUpdateDataSchema
|
|
23
|
-
from .types.extract_schema_validate_request_data_schema import ExtractSchemaValidateRequestDataSchema
|
|
24
23
|
|
|
25
24
|
try:
|
|
26
25
|
import pydantic
|
|
@@ -52,7 +51,7 @@ class LlamaExtractClient:
|
|
|
52
51
|
"""
|
|
53
52
|
_response = self._client_wrapper.httpx_client.request(
|
|
54
53
|
"GET",
|
|
55
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/
|
|
54
|
+
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extraction/extraction-agents"),
|
|
56
55
|
params=remove_none_from_dict({"project_id": project_id}),
|
|
57
56
|
headers=self._client_wrapper.get_headers(),
|
|
58
57
|
timeout=60,
|
|
@@ -72,9 +71,7 @@ class LlamaExtractClient:
|
|
|
72
71
|
*,
|
|
73
72
|
project_id: typing.Optional[str] = None,
|
|
74
73
|
organization_id: typing.Optional[str] = None,
|
|
75
|
-
|
|
76
|
-
data_schema: ExtractAgentCreateDataSchema,
|
|
77
|
-
config: ExtractConfig,
|
|
74
|
+
request: ExtractAgentCreate,
|
|
78
75
|
) -> ExtractAgent:
|
|
79
76
|
"""
|
|
80
77
|
Parameters:
|
|
@@ -82,31 +79,34 @@ class LlamaExtractClient:
|
|
|
82
79
|
|
|
83
80
|
- organization_id: typing.Optional[str].
|
|
84
81
|
|
|
85
|
-
-
|
|
86
|
-
|
|
87
|
-
- data_schema: ExtractAgentCreateDataSchema. The schema of the data.
|
|
88
|
-
|
|
89
|
-
- config: ExtractConfig. The configuration parameters for the extraction agent.
|
|
82
|
+
- request: ExtractAgentCreate.
|
|
90
83
|
---
|
|
91
|
-
from llama_cloud import
|
|
84
|
+
from llama_cloud import (
|
|
85
|
+
ExtractAgentCreate,
|
|
86
|
+
ExtractConfig,
|
|
87
|
+
ExtractMode,
|
|
88
|
+
ExtractTarget,
|
|
89
|
+
)
|
|
92
90
|
from llama_cloud.client import LlamaCloud
|
|
93
91
|
|
|
94
92
|
client = LlamaCloud(
|
|
95
93
|
token="YOUR_TOKEN",
|
|
96
94
|
)
|
|
97
95
|
client.llama_extract.create_extraction_agent(
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
96
|
+
request=ExtractAgentCreate(
|
|
97
|
+
name="string",
|
|
98
|
+
config=ExtractConfig(
|
|
99
|
+
extraction_target=ExtractTarget.PER_DOC,
|
|
100
|
+
extraction_mode=ExtractMode.FAST,
|
|
101
|
+
),
|
|
102
102
|
),
|
|
103
103
|
)
|
|
104
104
|
"""
|
|
105
105
|
_response = self._client_wrapper.httpx_client.request(
|
|
106
106
|
"POST",
|
|
107
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/
|
|
107
|
+
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extraction/extraction-agents"),
|
|
108
108
|
params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
|
|
109
|
-
json=jsonable_encoder(
|
|
109
|
+
json=jsonable_encoder(request),
|
|
110
110
|
headers=self._client_wrapper.get_headers(),
|
|
111
111
|
timeout=60,
|
|
112
112
|
)
|
|
@@ -120,29 +120,30 @@ class LlamaExtractClient:
|
|
|
120
120
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
121
121
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
122
122
|
|
|
123
|
-
def validate_extraction_schema(
|
|
124
|
-
self, *, data_schema: ExtractSchemaValidateRequestDataSchema
|
|
125
|
-
) -> ExtractSchemaValidateResponse:
|
|
123
|
+
def validate_extraction_schema(self, *, request: ExtractSchemaValidateRequest) -> ExtractSchemaValidateResponse:
|
|
126
124
|
"""
|
|
127
125
|
Validates an extraction agent's schema definition.
|
|
128
126
|
Returns the normalized and validated schema if valid, otherwise raises an HTTP 400.
|
|
129
127
|
|
|
130
128
|
Parameters:
|
|
131
|
-
-
|
|
129
|
+
- request: ExtractSchemaValidateRequest.
|
|
132
130
|
---
|
|
131
|
+
from llama_cloud import ExtractSchemaValidateRequest
|
|
133
132
|
from llama_cloud.client import LlamaCloud
|
|
134
133
|
|
|
135
134
|
client = LlamaCloud(
|
|
136
135
|
token="YOUR_TOKEN",
|
|
137
136
|
)
|
|
138
|
-
client.llama_extract.validate_extraction_schema(
|
|
137
|
+
client.llama_extract.validate_extraction_schema(
|
|
138
|
+
request=ExtractSchemaValidateRequest(),
|
|
139
|
+
)
|
|
139
140
|
"""
|
|
140
141
|
_response = self._client_wrapper.httpx_client.request(
|
|
141
142
|
"POST",
|
|
142
143
|
urllib.parse.urljoin(
|
|
143
|
-
f"{self._client_wrapper.get_base_url()}/", "api/v1/
|
|
144
|
+
f"{self._client_wrapper.get_base_url()}/", "api/v1/extraction/extraction-agents/schema/validation"
|
|
144
145
|
),
|
|
145
|
-
json=jsonable_encoder(
|
|
146
|
+
json=jsonable_encoder(request),
|
|
146
147
|
headers=self._client_wrapper.get_headers(),
|
|
147
148
|
timeout=60,
|
|
148
149
|
)
|
|
@@ -175,7 +176,7 @@ class LlamaExtractClient:
|
|
|
175
176
|
_response = self._client_wrapper.httpx_client.request(
|
|
176
177
|
"GET",
|
|
177
178
|
urllib.parse.urljoin(
|
|
178
|
-
f"{self._client_wrapper.get_base_url()}/", f"api/v1/
|
|
179
|
+
f"{self._client_wrapper.get_base_url()}/", f"api/v1/extraction/extraction-agents/by-name/{name}"
|
|
179
180
|
),
|
|
180
181
|
params=remove_none_from_dict({"project_id": project_id}),
|
|
181
182
|
headers=self._client_wrapper.get_headers(),
|
|
@@ -208,8 +209,7 @@ class LlamaExtractClient:
|
|
|
208
209
|
_response = self._client_wrapper.httpx_client.request(
|
|
209
210
|
"GET",
|
|
210
211
|
urllib.parse.urljoin(
|
|
211
|
-
f"{self._client_wrapper.get_base_url()}/",
|
|
212
|
-
f"api/v1/extractionv2/extraction-agents/{extraction_agent_id}",
|
|
212
|
+
f"{self._client_wrapper.get_base_url()}/", f"api/v1/extraction/extraction-agents/{extraction_agent_id}"
|
|
213
213
|
),
|
|
214
214
|
headers=self._client_wrapper.get_headers(),
|
|
215
215
|
timeout=60,
|
|
@@ -224,18 +224,19 @@ class LlamaExtractClient:
|
|
|
224
224
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
225
225
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
226
226
|
|
|
227
|
-
def update_extraction_agent(
|
|
228
|
-
self, extraction_agent_id: str, *, data_schema: ExtractAgentUpdateDataSchema, config: ExtractConfig
|
|
229
|
-
) -> ExtractAgent:
|
|
227
|
+
def update_extraction_agent(self, extraction_agent_id: str, *, request: ExtractAgentUpdate) -> ExtractAgent:
|
|
230
228
|
"""
|
|
231
229
|
Parameters:
|
|
232
230
|
- extraction_agent_id: str.
|
|
233
231
|
|
|
234
|
-
-
|
|
235
|
-
|
|
236
|
-
- config: ExtractConfig. The configuration parameters for the extraction agent.
|
|
232
|
+
- request: ExtractAgentUpdate.
|
|
237
233
|
---
|
|
238
|
-
from llama_cloud import
|
|
234
|
+
from llama_cloud import (
|
|
235
|
+
ExtractAgentUpdate,
|
|
236
|
+
ExtractConfig,
|
|
237
|
+
ExtractMode,
|
|
238
|
+
ExtractTarget,
|
|
239
|
+
)
|
|
239
240
|
from llama_cloud.client import LlamaCloud
|
|
240
241
|
|
|
241
242
|
client = LlamaCloud(
|
|
@@ -243,19 +244,20 @@ class LlamaExtractClient:
|
|
|
243
244
|
)
|
|
244
245
|
client.llama_extract.update_extraction_agent(
|
|
245
246
|
extraction_agent_id="string",
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
247
|
+
request=ExtractAgentUpdate(
|
|
248
|
+
config=ExtractConfig(
|
|
249
|
+
extraction_target=ExtractTarget.PER_DOC,
|
|
250
|
+
extraction_mode=ExtractMode.FAST,
|
|
251
|
+
),
|
|
249
252
|
),
|
|
250
253
|
)
|
|
251
254
|
"""
|
|
252
255
|
_response = self._client_wrapper.httpx_client.request(
|
|
253
256
|
"PUT",
|
|
254
257
|
urllib.parse.urljoin(
|
|
255
|
-
f"{self._client_wrapper.get_base_url()}/",
|
|
256
|
-
f"api/v1/extractionv2/extraction-agents/{extraction_agent_id}",
|
|
258
|
+
f"{self._client_wrapper.get_base_url()}/", f"api/v1/extraction/extraction-agents/{extraction_agent_id}"
|
|
257
259
|
),
|
|
258
|
-
json=jsonable_encoder(
|
|
260
|
+
json=jsonable_encoder(request),
|
|
259
261
|
headers=self._client_wrapper.get_headers(),
|
|
260
262
|
timeout=60,
|
|
261
263
|
)
|
|
@@ -286,8 +288,7 @@ class LlamaExtractClient:
|
|
|
286
288
|
_response = self._client_wrapper.httpx_client.request(
|
|
287
289
|
"DELETE",
|
|
288
290
|
urllib.parse.urljoin(
|
|
289
|
-
f"{self._client_wrapper.get_base_url()}/",
|
|
290
|
-
f"api/v1/extractionv2/extraction-agents/{extraction_agent_id}",
|
|
291
|
+
f"{self._client_wrapper.get_base_url()}/", f"api/v1/extraction/extraction-agents/{extraction_agent_id}"
|
|
291
292
|
),
|
|
292
293
|
headers=self._client_wrapper.get_headers(),
|
|
293
294
|
timeout=60,
|
|
@@ -318,7 +319,7 @@ class LlamaExtractClient:
|
|
|
318
319
|
"""
|
|
319
320
|
_response = self._client_wrapper.httpx_client.request(
|
|
320
321
|
"GET",
|
|
321
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/
|
|
322
|
+
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extraction/jobs"),
|
|
322
323
|
params=remove_none_from_dict({"extraction_agent_id": extraction_agent_id}),
|
|
323
324
|
headers=self._client_wrapper.get_headers(),
|
|
324
325
|
timeout=60,
|
|
@@ -362,7 +363,7 @@ class LlamaExtractClient:
|
|
|
362
363
|
"""
|
|
363
364
|
_response = self._client_wrapper.httpx_client.request(
|
|
364
365
|
"POST",
|
|
365
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/
|
|
366
|
+
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extraction/jobs"),
|
|
366
367
|
json=jsonable_encoder(request),
|
|
367
368
|
headers=self._client_wrapper.get_headers(),
|
|
368
369
|
timeout=60,
|
|
@@ -393,7 +394,7 @@ class LlamaExtractClient:
|
|
|
393
394
|
"""
|
|
394
395
|
_response = self._client_wrapper.httpx_client.request(
|
|
395
396
|
"GET",
|
|
396
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/
|
|
397
|
+
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/extraction/jobs/{job_id}"),
|
|
397
398
|
headers=self._client_wrapper.get_headers(),
|
|
398
399
|
timeout=60,
|
|
399
400
|
)
|
|
@@ -407,111 +408,6 @@ class LlamaExtractClient:
|
|
|
407
408
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
408
409
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
409
410
|
|
|
410
|
-
def run_job_with_parsed_file_test(
|
|
411
|
-
self, *, job_create: ExtractJobCreate, extract_settings: typing.Optional[LlamaExtractSettings] = OMIT
|
|
412
|
-
) -> ExtractResultset:
|
|
413
|
-
"""
|
|
414
|
-
Parameters:
|
|
415
|
-
- job_create: ExtractJobCreate.
|
|
416
|
-
|
|
417
|
-
- extract_settings: typing.Optional[LlamaExtractSettings].
|
|
418
|
-
---
|
|
419
|
-
from llama_cloud import (
|
|
420
|
-
ChunkMode,
|
|
421
|
-
ExtractConfig,
|
|
422
|
-
ExtractJobCreate,
|
|
423
|
-
ExtractMode,
|
|
424
|
-
ExtractTarget,
|
|
425
|
-
LlamaExtractSettings,
|
|
426
|
-
LlamaParseParameters,
|
|
427
|
-
ParsingMode,
|
|
428
|
-
)
|
|
429
|
-
from llama_cloud.client import LlamaCloud
|
|
430
|
-
|
|
431
|
-
client = LlamaCloud(
|
|
432
|
-
token="YOUR_TOKEN",
|
|
433
|
-
)
|
|
434
|
-
client.llama_extract.run_job_with_parsed_file_test(
|
|
435
|
-
job_create=ExtractJobCreate(
|
|
436
|
-
extraction_agent_id="string",
|
|
437
|
-
file_id="string",
|
|
438
|
-
config_override=ExtractConfig(
|
|
439
|
-
extraction_target=ExtractTarget.PER_DOC,
|
|
440
|
-
extraction_mode=ExtractMode.FAST,
|
|
441
|
-
),
|
|
442
|
-
),
|
|
443
|
-
extract_settings=LlamaExtractSettings(
|
|
444
|
-
chunk_mode=ChunkMode.PAGE,
|
|
445
|
-
llama_parse_params=LlamaParseParameters(
|
|
446
|
-
parse_mode=ParsingMode.PARSE_PAGE_WITHOUT_LLM,
|
|
447
|
-
),
|
|
448
|
-
),
|
|
449
|
-
)
|
|
450
|
-
"""
|
|
451
|
-
_request: typing.Dict[str, typing.Any] = {"job_create": job_create}
|
|
452
|
-
if extract_settings is not OMIT:
|
|
453
|
-
_request["extract_settings"] = extract_settings
|
|
454
|
-
_response = self._client_wrapper.httpx_client.request(
|
|
455
|
-
"POST",
|
|
456
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/jobs/parsed/test"),
|
|
457
|
-
json=jsonable_encoder(_request),
|
|
458
|
-
headers=self._client_wrapper.get_headers(),
|
|
459
|
-
timeout=60,
|
|
460
|
-
)
|
|
461
|
-
if 200 <= _response.status_code < 300:
|
|
462
|
-
return pydantic.parse_obj_as(ExtractResultset, _response.json()) # type: ignore
|
|
463
|
-
if _response.status_code == 422:
|
|
464
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
465
|
-
try:
|
|
466
|
-
_response_json = _response.json()
|
|
467
|
-
except JSONDecodeError:
|
|
468
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
469
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
470
|
-
|
|
471
|
-
def run_job_with_parsed_file(self, *, request: ExtractJobCreate) -> ExtractResultset:
|
|
472
|
-
"""
|
|
473
|
-
Parameters:
|
|
474
|
-
- request: ExtractJobCreate.
|
|
475
|
-
---
|
|
476
|
-
from llama_cloud import (
|
|
477
|
-
ExtractConfig,
|
|
478
|
-
ExtractJobCreate,
|
|
479
|
-
ExtractMode,
|
|
480
|
-
ExtractTarget,
|
|
481
|
-
)
|
|
482
|
-
from llama_cloud.client import LlamaCloud
|
|
483
|
-
|
|
484
|
-
client = LlamaCloud(
|
|
485
|
-
token="YOUR_TOKEN",
|
|
486
|
-
)
|
|
487
|
-
client.llama_extract.run_job_with_parsed_file(
|
|
488
|
-
request=ExtractJobCreate(
|
|
489
|
-
extraction_agent_id="string",
|
|
490
|
-
file_id="string",
|
|
491
|
-
config_override=ExtractConfig(
|
|
492
|
-
extraction_target=ExtractTarget.PER_DOC,
|
|
493
|
-
extraction_mode=ExtractMode.FAST,
|
|
494
|
-
),
|
|
495
|
-
),
|
|
496
|
-
)
|
|
497
|
-
"""
|
|
498
|
-
_response = self._client_wrapper.httpx_client.request(
|
|
499
|
-
"POST",
|
|
500
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/jobs/parsed"),
|
|
501
|
-
json=jsonable_encoder(request),
|
|
502
|
-
headers=self._client_wrapper.get_headers(),
|
|
503
|
-
timeout=60,
|
|
504
|
-
)
|
|
505
|
-
if 200 <= _response.status_code < 300:
|
|
506
|
-
return pydantic.parse_obj_as(ExtractResultset, _response.json()) # type: ignore
|
|
507
|
-
if _response.status_code == 422:
|
|
508
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
509
|
-
try:
|
|
510
|
-
_response_json = _response.json()
|
|
511
|
-
except JSONDecodeError:
|
|
512
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
513
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
514
|
-
|
|
515
411
|
def run_job_test_user(
|
|
516
412
|
self, *, job_create: ExtractJobCreate, extract_settings: typing.Optional[LlamaExtractSettings] = OMIT
|
|
517
413
|
) -> ExtractJob:
|
|
@@ -558,7 +454,7 @@ class LlamaExtractClient:
|
|
|
558
454
|
_request["extract_settings"] = extract_settings
|
|
559
455
|
_response = self._client_wrapper.httpx_client.request(
|
|
560
456
|
"POST",
|
|
561
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/
|
|
457
|
+
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extraction/jobs/test"),
|
|
562
458
|
json=jsonable_encoder(_request),
|
|
563
459
|
headers=self._client_wrapper.get_headers(),
|
|
564
460
|
timeout=60,
|
|
@@ -589,9 +485,7 @@ class LlamaExtractClient:
|
|
|
589
485
|
"""
|
|
590
486
|
_response = self._client_wrapper.httpx_client.request(
|
|
591
487
|
"GET",
|
|
592
|
-
urllib.parse.urljoin(
|
|
593
|
-
f"{self._client_wrapper.get_base_url()}/", f"api/v1/extractionv2/jobs/{job_id}/result"
|
|
594
|
-
),
|
|
488
|
+
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/extraction/jobs/{job_id}/result"),
|
|
595
489
|
headers=self._client_wrapper.get_headers(),
|
|
596
490
|
timeout=60,
|
|
597
491
|
)
|
|
@@ -621,7 +515,7 @@ class LlamaExtractClient:
|
|
|
621
515
|
"""
|
|
622
516
|
_response = self._client_wrapper.httpx_client.request(
|
|
623
517
|
"GET",
|
|
624
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/
|
|
518
|
+
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extraction/runs"),
|
|
625
519
|
params=remove_none_from_dict({"extraction_agent_id": extraction_agent_id}),
|
|
626
520
|
headers=self._client_wrapper.get_headers(),
|
|
627
521
|
timeout=60,
|
|
@@ -652,9 +546,7 @@ class LlamaExtractClient:
|
|
|
652
546
|
"""
|
|
653
547
|
_response = self._client_wrapper.httpx_client.request(
|
|
654
548
|
"GET",
|
|
655
|
-
urllib.parse.urljoin(
|
|
656
|
-
f"{self._client_wrapper.get_base_url()}/", f"api/v1/extractionv2/runs/by-job/{job_id}"
|
|
657
|
-
),
|
|
549
|
+
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/extraction/runs/by-job/{job_id}"),
|
|
658
550
|
headers=self._client_wrapper.get_headers(),
|
|
659
551
|
timeout=60,
|
|
660
552
|
)
|
|
@@ -684,7 +576,7 @@ class LlamaExtractClient:
|
|
|
684
576
|
"""
|
|
685
577
|
_response = self._client_wrapper.httpx_client.request(
|
|
686
578
|
"GET",
|
|
687
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/
|
|
579
|
+
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/extraction/runs/{run_id}"),
|
|
688
580
|
headers=self._client_wrapper.get_headers(),
|
|
689
581
|
timeout=60,
|
|
690
582
|
)
|
|
@@ -717,7 +609,7 @@ class AsyncLlamaExtractClient:
|
|
|
717
609
|
"""
|
|
718
610
|
_response = await self._client_wrapper.httpx_client.request(
|
|
719
611
|
"GET",
|
|
720
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/
|
|
612
|
+
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extraction/extraction-agents"),
|
|
721
613
|
params=remove_none_from_dict({"project_id": project_id}),
|
|
722
614
|
headers=self._client_wrapper.get_headers(),
|
|
723
615
|
timeout=60,
|
|
@@ -737,9 +629,7 @@ class AsyncLlamaExtractClient:
|
|
|
737
629
|
*,
|
|
738
630
|
project_id: typing.Optional[str] = None,
|
|
739
631
|
organization_id: typing.Optional[str] = None,
|
|
740
|
-
|
|
741
|
-
data_schema: ExtractAgentCreateDataSchema,
|
|
742
|
-
config: ExtractConfig,
|
|
632
|
+
request: ExtractAgentCreate,
|
|
743
633
|
) -> ExtractAgent:
|
|
744
634
|
"""
|
|
745
635
|
Parameters:
|
|
@@ -747,31 +637,34 @@ class AsyncLlamaExtractClient:
|
|
|
747
637
|
|
|
748
638
|
- organization_id: typing.Optional[str].
|
|
749
639
|
|
|
750
|
-
-
|
|
751
|
-
|
|
752
|
-
- data_schema: ExtractAgentCreateDataSchema. The schema of the data.
|
|
753
|
-
|
|
754
|
-
- config: ExtractConfig. The configuration parameters for the extraction agent.
|
|
640
|
+
- request: ExtractAgentCreate.
|
|
755
641
|
---
|
|
756
|
-
from llama_cloud import
|
|
642
|
+
from llama_cloud import (
|
|
643
|
+
ExtractAgentCreate,
|
|
644
|
+
ExtractConfig,
|
|
645
|
+
ExtractMode,
|
|
646
|
+
ExtractTarget,
|
|
647
|
+
)
|
|
757
648
|
from llama_cloud.client import AsyncLlamaCloud
|
|
758
649
|
|
|
759
650
|
client = AsyncLlamaCloud(
|
|
760
651
|
token="YOUR_TOKEN",
|
|
761
652
|
)
|
|
762
653
|
await client.llama_extract.create_extraction_agent(
|
|
763
|
-
|
|
764
|
-
|
|
765
|
-
|
|
766
|
-
|
|
654
|
+
request=ExtractAgentCreate(
|
|
655
|
+
name="string",
|
|
656
|
+
config=ExtractConfig(
|
|
657
|
+
extraction_target=ExtractTarget.PER_DOC,
|
|
658
|
+
extraction_mode=ExtractMode.FAST,
|
|
659
|
+
),
|
|
767
660
|
),
|
|
768
661
|
)
|
|
769
662
|
"""
|
|
770
663
|
_response = await self._client_wrapper.httpx_client.request(
|
|
771
664
|
"POST",
|
|
772
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/
|
|
665
|
+
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extraction/extraction-agents"),
|
|
773
666
|
params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
|
|
774
|
-
json=jsonable_encoder(
|
|
667
|
+
json=jsonable_encoder(request),
|
|
775
668
|
headers=self._client_wrapper.get_headers(),
|
|
776
669
|
timeout=60,
|
|
777
670
|
)
|
|
@@ -786,28 +679,31 @@ class AsyncLlamaExtractClient:
|
|
|
786
679
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
787
680
|
|
|
788
681
|
async def validate_extraction_schema(
|
|
789
|
-
self, *,
|
|
682
|
+
self, *, request: ExtractSchemaValidateRequest
|
|
790
683
|
) -> ExtractSchemaValidateResponse:
|
|
791
684
|
"""
|
|
792
685
|
Validates an extraction agent's schema definition.
|
|
793
686
|
Returns the normalized and validated schema if valid, otherwise raises an HTTP 400.
|
|
794
687
|
|
|
795
688
|
Parameters:
|
|
796
|
-
-
|
|
689
|
+
- request: ExtractSchemaValidateRequest.
|
|
797
690
|
---
|
|
691
|
+
from llama_cloud import ExtractSchemaValidateRequest
|
|
798
692
|
from llama_cloud.client import AsyncLlamaCloud
|
|
799
693
|
|
|
800
694
|
client = AsyncLlamaCloud(
|
|
801
695
|
token="YOUR_TOKEN",
|
|
802
696
|
)
|
|
803
|
-
await client.llama_extract.validate_extraction_schema(
|
|
697
|
+
await client.llama_extract.validate_extraction_schema(
|
|
698
|
+
request=ExtractSchemaValidateRequest(),
|
|
699
|
+
)
|
|
804
700
|
"""
|
|
805
701
|
_response = await self._client_wrapper.httpx_client.request(
|
|
806
702
|
"POST",
|
|
807
703
|
urllib.parse.urljoin(
|
|
808
|
-
f"{self._client_wrapper.get_base_url()}/", "api/v1/
|
|
704
|
+
f"{self._client_wrapper.get_base_url()}/", "api/v1/extraction/extraction-agents/schema/validation"
|
|
809
705
|
),
|
|
810
|
-
json=jsonable_encoder(
|
|
706
|
+
json=jsonable_encoder(request),
|
|
811
707
|
headers=self._client_wrapper.get_headers(),
|
|
812
708
|
timeout=60,
|
|
813
709
|
)
|
|
@@ -840,7 +736,7 @@ class AsyncLlamaExtractClient:
|
|
|
840
736
|
_response = await self._client_wrapper.httpx_client.request(
|
|
841
737
|
"GET",
|
|
842
738
|
urllib.parse.urljoin(
|
|
843
|
-
f"{self._client_wrapper.get_base_url()}/", f"api/v1/
|
|
739
|
+
f"{self._client_wrapper.get_base_url()}/", f"api/v1/extraction/extraction-agents/by-name/{name}"
|
|
844
740
|
),
|
|
845
741
|
params=remove_none_from_dict({"project_id": project_id}),
|
|
846
742
|
headers=self._client_wrapper.get_headers(),
|
|
@@ -873,8 +769,7 @@ class AsyncLlamaExtractClient:
|
|
|
873
769
|
_response = await self._client_wrapper.httpx_client.request(
|
|
874
770
|
"GET",
|
|
875
771
|
urllib.parse.urljoin(
|
|
876
|
-
f"{self._client_wrapper.get_base_url()}/",
|
|
877
|
-
f"api/v1/extractionv2/extraction-agents/{extraction_agent_id}",
|
|
772
|
+
f"{self._client_wrapper.get_base_url()}/", f"api/v1/extraction/extraction-agents/{extraction_agent_id}"
|
|
878
773
|
),
|
|
879
774
|
headers=self._client_wrapper.get_headers(),
|
|
880
775
|
timeout=60,
|
|
@@ -889,18 +784,19 @@ class AsyncLlamaExtractClient:
|
|
|
889
784
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
890
785
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
891
786
|
|
|
892
|
-
async def update_extraction_agent(
|
|
893
|
-
self, extraction_agent_id: str, *, data_schema: ExtractAgentUpdateDataSchema, config: ExtractConfig
|
|
894
|
-
) -> ExtractAgent:
|
|
787
|
+
async def update_extraction_agent(self, extraction_agent_id: str, *, request: ExtractAgentUpdate) -> ExtractAgent:
|
|
895
788
|
"""
|
|
896
789
|
Parameters:
|
|
897
790
|
- extraction_agent_id: str.
|
|
898
791
|
|
|
899
|
-
-
|
|
900
|
-
|
|
901
|
-
- config: ExtractConfig. The configuration parameters for the extraction agent.
|
|
792
|
+
- request: ExtractAgentUpdate.
|
|
902
793
|
---
|
|
903
|
-
from llama_cloud import
|
|
794
|
+
from llama_cloud import (
|
|
795
|
+
ExtractAgentUpdate,
|
|
796
|
+
ExtractConfig,
|
|
797
|
+
ExtractMode,
|
|
798
|
+
ExtractTarget,
|
|
799
|
+
)
|
|
904
800
|
from llama_cloud.client import AsyncLlamaCloud
|
|
905
801
|
|
|
906
802
|
client = AsyncLlamaCloud(
|
|
@@ -908,19 +804,20 @@ class AsyncLlamaExtractClient:
|
|
|
908
804
|
)
|
|
909
805
|
await client.llama_extract.update_extraction_agent(
|
|
910
806
|
extraction_agent_id="string",
|
|
911
|
-
|
|
912
|
-
|
|
913
|
-
|
|
807
|
+
request=ExtractAgentUpdate(
|
|
808
|
+
config=ExtractConfig(
|
|
809
|
+
extraction_target=ExtractTarget.PER_DOC,
|
|
810
|
+
extraction_mode=ExtractMode.FAST,
|
|
811
|
+
),
|
|
914
812
|
),
|
|
915
813
|
)
|
|
916
814
|
"""
|
|
917
815
|
_response = await self._client_wrapper.httpx_client.request(
|
|
918
816
|
"PUT",
|
|
919
817
|
urllib.parse.urljoin(
|
|
920
|
-
f"{self._client_wrapper.get_base_url()}/",
|
|
921
|
-
f"api/v1/extractionv2/extraction-agents/{extraction_agent_id}",
|
|
818
|
+
f"{self._client_wrapper.get_base_url()}/", f"api/v1/extraction/extraction-agents/{extraction_agent_id}"
|
|
922
819
|
),
|
|
923
|
-
json=jsonable_encoder(
|
|
820
|
+
json=jsonable_encoder(request),
|
|
924
821
|
headers=self._client_wrapper.get_headers(),
|
|
925
822
|
timeout=60,
|
|
926
823
|
)
|
|
@@ -951,8 +848,7 @@ class AsyncLlamaExtractClient:
|
|
|
951
848
|
_response = await self._client_wrapper.httpx_client.request(
|
|
952
849
|
"DELETE",
|
|
953
850
|
urllib.parse.urljoin(
|
|
954
|
-
f"{self._client_wrapper.get_base_url()}/",
|
|
955
|
-
f"api/v1/extractionv2/extraction-agents/{extraction_agent_id}",
|
|
851
|
+
f"{self._client_wrapper.get_base_url()}/", f"api/v1/extraction/extraction-agents/{extraction_agent_id}"
|
|
956
852
|
),
|
|
957
853
|
headers=self._client_wrapper.get_headers(),
|
|
958
854
|
timeout=60,
|
|
@@ -983,7 +879,7 @@ class AsyncLlamaExtractClient:
|
|
|
983
879
|
"""
|
|
984
880
|
_response = await self._client_wrapper.httpx_client.request(
|
|
985
881
|
"GET",
|
|
986
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/
|
|
882
|
+
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extraction/jobs"),
|
|
987
883
|
params=remove_none_from_dict({"extraction_agent_id": extraction_agent_id}),
|
|
988
884
|
headers=self._client_wrapper.get_headers(),
|
|
989
885
|
timeout=60,
|
|
@@ -1027,7 +923,7 @@ class AsyncLlamaExtractClient:
|
|
|
1027
923
|
"""
|
|
1028
924
|
_response = await self._client_wrapper.httpx_client.request(
|
|
1029
925
|
"POST",
|
|
1030
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/
|
|
926
|
+
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extraction/jobs"),
|
|
1031
927
|
json=jsonable_encoder(request),
|
|
1032
928
|
headers=self._client_wrapper.get_headers(),
|
|
1033
929
|
timeout=60,
|
|
@@ -1058,7 +954,7 @@ class AsyncLlamaExtractClient:
|
|
|
1058
954
|
"""
|
|
1059
955
|
_response = await self._client_wrapper.httpx_client.request(
|
|
1060
956
|
"GET",
|
|
1061
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/
|
|
957
|
+
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/extraction/jobs/{job_id}"),
|
|
1062
958
|
headers=self._client_wrapper.get_headers(),
|
|
1063
959
|
timeout=60,
|
|
1064
960
|
)
|
|
@@ -1072,111 +968,6 @@ class AsyncLlamaExtractClient:
|
|
|
1072
968
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
1073
969
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
1074
970
|
|
|
1075
|
-
async def run_job_with_parsed_file_test(
|
|
1076
|
-
self, *, job_create: ExtractJobCreate, extract_settings: typing.Optional[LlamaExtractSettings] = OMIT
|
|
1077
|
-
) -> ExtractResultset:
|
|
1078
|
-
"""
|
|
1079
|
-
Parameters:
|
|
1080
|
-
- job_create: ExtractJobCreate.
|
|
1081
|
-
|
|
1082
|
-
- extract_settings: typing.Optional[LlamaExtractSettings].
|
|
1083
|
-
---
|
|
1084
|
-
from llama_cloud import (
|
|
1085
|
-
ChunkMode,
|
|
1086
|
-
ExtractConfig,
|
|
1087
|
-
ExtractJobCreate,
|
|
1088
|
-
ExtractMode,
|
|
1089
|
-
ExtractTarget,
|
|
1090
|
-
LlamaExtractSettings,
|
|
1091
|
-
LlamaParseParameters,
|
|
1092
|
-
ParsingMode,
|
|
1093
|
-
)
|
|
1094
|
-
from llama_cloud.client import AsyncLlamaCloud
|
|
1095
|
-
|
|
1096
|
-
client = AsyncLlamaCloud(
|
|
1097
|
-
token="YOUR_TOKEN",
|
|
1098
|
-
)
|
|
1099
|
-
await client.llama_extract.run_job_with_parsed_file_test(
|
|
1100
|
-
job_create=ExtractJobCreate(
|
|
1101
|
-
extraction_agent_id="string",
|
|
1102
|
-
file_id="string",
|
|
1103
|
-
config_override=ExtractConfig(
|
|
1104
|
-
extraction_target=ExtractTarget.PER_DOC,
|
|
1105
|
-
extraction_mode=ExtractMode.FAST,
|
|
1106
|
-
),
|
|
1107
|
-
),
|
|
1108
|
-
extract_settings=LlamaExtractSettings(
|
|
1109
|
-
chunk_mode=ChunkMode.PAGE,
|
|
1110
|
-
llama_parse_params=LlamaParseParameters(
|
|
1111
|
-
parse_mode=ParsingMode.PARSE_PAGE_WITHOUT_LLM,
|
|
1112
|
-
),
|
|
1113
|
-
),
|
|
1114
|
-
)
|
|
1115
|
-
"""
|
|
1116
|
-
_request: typing.Dict[str, typing.Any] = {"job_create": job_create}
|
|
1117
|
-
if extract_settings is not OMIT:
|
|
1118
|
-
_request["extract_settings"] = extract_settings
|
|
1119
|
-
_response = await self._client_wrapper.httpx_client.request(
|
|
1120
|
-
"POST",
|
|
1121
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/jobs/parsed/test"),
|
|
1122
|
-
json=jsonable_encoder(_request),
|
|
1123
|
-
headers=self._client_wrapper.get_headers(),
|
|
1124
|
-
timeout=60,
|
|
1125
|
-
)
|
|
1126
|
-
if 200 <= _response.status_code < 300:
|
|
1127
|
-
return pydantic.parse_obj_as(ExtractResultset, _response.json()) # type: ignore
|
|
1128
|
-
if _response.status_code == 422:
|
|
1129
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
1130
|
-
try:
|
|
1131
|
-
_response_json = _response.json()
|
|
1132
|
-
except JSONDecodeError:
|
|
1133
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
1134
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
1135
|
-
|
|
1136
|
-
async def run_job_with_parsed_file(self, *, request: ExtractJobCreate) -> ExtractResultset:
|
|
1137
|
-
"""
|
|
1138
|
-
Parameters:
|
|
1139
|
-
- request: ExtractJobCreate.
|
|
1140
|
-
---
|
|
1141
|
-
from llama_cloud import (
|
|
1142
|
-
ExtractConfig,
|
|
1143
|
-
ExtractJobCreate,
|
|
1144
|
-
ExtractMode,
|
|
1145
|
-
ExtractTarget,
|
|
1146
|
-
)
|
|
1147
|
-
from llama_cloud.client import AsyncLlamaCloud
|
|
1148
|
-
|
|
1149
|
-
client = AsyncLlamaCloud(
|
|
1150
|
-
token="YOUR_TOKEN",
|
|
1151
|
-
)
|
|
1152
|
-
await client.llama_extract.run_job_with_parsed_file(
|
|
1153
|
-
request=ExtractJobCreate(
|
|
1154
|
-
extraction_agent_id="string",
|
|
1155
|
-
file_id="string",
|
|
1156
|
-
config_override=ExtractConfig(
|
|
1157
|
-
extraction_target=ExtractTarget.PER_DOC,
|
|
1158
|
-
extraction_mode=ExtractMode.FAST,
|
|
1159
|
-
),
|
|
1160
|
-
),
|
|
1161
|
-
)
|
|
1162
|
-
"""
|
|
1163
|
-
_response = await self._client_wrapper.httpx_client.request(
|
|
1164
|
-
"POST",
|
|
1165
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/jobs/parsed"),
|
|
1166
|
-
json=jsonable_encoder(request),
|
|
1167
|
-
headers=self._client_wrapper.get_headers(),
|
|
1168
|
-
timeout=60,
|
|
1169
|
-
)
|
|
1170
|
-
if 200 <= _response.status_code < 300:
|
|
1171
|
-
return pydantic.parse_obj_as(ExtractResultset, _response.json()) # type: ignore
|
|
1172
|
-
if _response.status_code == 422:
|
|
1173
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
1174
|
-
try:
|
|
1175
|
-
_response_json = _response.json()
|
|
1176
|
-
except JSONDecodeError:
|
|
1177
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
1178
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
1179
|
-
|
|
1180
971
|
async def run_job_test_user(
|
|
1181
972
|
self, *, job_create: ExtractJobCreate, extract_settings: typing.Optional[LlamaExtractSettings] = OMIT
|
|
1182
973
|
) -> ExtractJob:
|
|
@@ -1223,7 +1014,7 @@ class AsyncLlamaExtractClient:
|
|
|
1223
1014
|
_request["extract_settings"] = extract_settings
|
|
1224
1015
|
_response = await self._client_wrapper.httpx_client.request(
|
|
1225
1016
|
"POST",
|
|
1226
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/
|
|
1017
|
+
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extraction/jobs/test"),
|
|
1227
1018
|
json=jsonable_encoder(_request),
|
|
1228
1019
|
headers=self._client_wrapper.get_headers(),
|
|
1229
1020
|
timeout=60,
|
|
@@ -1254,9 +1045,7 @@ class AsyncLlamaExtractClient:
|
|
|
1254
1045
|
"""
|
|
1255
1046
|
_response = await self._client_wrapper.httpx_client.request(
|
|
1256
1047
|
"GET",
|
|
1257
|
-
urllib.parse.urljoin(
|
|
1258
|
-
f"{self._client_wrapper.get_base_url()}/", f"api/v1/extractionv2/jobs/{job_id}/result"
|
|
1259
|
-
),
|
|
1048
|
+
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/extraction/jobs/{job_id}/result"),
|
|
1260
1049
|
headers=self._client_wrapper.get_headers(),
|
|
1261
1050
|
timeout=60,
|
|
1262
1051
|
)
|
|
@@ -1286,7 +1075,7 @@ class AsyncLlamaExtractClient:
|
|
|
1286
1075
|
"""
|
|
1287
1076
|
_response = await self._client_wrapper.httpx_client.request(
|
|
1288
1077
|
"GET",
|
|
1289
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/
|
|
1078
|
+
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extraction/runs"),
|
|
1290
1079
|
params=remove_none_from_dict({"extraction_agent_id": extraction_agent_id}),
|
|
1291
1080
|
headers=self._client_wrapper.get_headers(),
|
|
1292
1081
|
timeout=60,
|
|
@@ -1317,9 +1106,7 @@ class AsyncLlamaExtractClient:
|
|
|
1317
1106
|
"""
|
|
1318
1107
|
_response = await self._client_wrapper.httpx_client.request(
|
|
1319
1108
|
"GET",
|
|
1320
|
-
urllib.parse.urljoin(
|
|
1321
|
-
f"{self._client_wrapper.get_base_url()}/", f"api/v1/extractionv2/runs/by-job/{job_id}"
|
|
1322
|
-
),
|
|
1109
|
+
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/extraction/runs/by-job/{job_id}"),
|
|
1323
1110
|
headers=self._client_wrapper.get_headers(),
|
|
1324
1111
|
timeout=60,
|
|
1325
1112
|
)
|
|
@@ -1349,7 +1136,7 @@ class AsyncLlamaExtractClient:
|
|
|
1349
1136
|
"""
|
|
1350
1137
|
_response = await self._client_wrapper.httpx_client.request(
|
|
1351
1138
|
"GET",
|
|
1352
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/
|
|
1139
|
+
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/extraction/runs/{run_id}"),
|
|
1353
1140
|
headers=self._client_wrapper.get_headers(),
|
|
1354
1141
|
timeout=60,
|
|
1355
1142
|
)
|