llama-cloud 0.1.12__py3-none-any.whl → 0.1.14__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of llama-cloud might be problematic. Click here for more details.

Files changed (46) hide show
  1. llama_cloud/__init__.py +38 -12
  2. llama_cloud/resources/__init__.py +0 -14
  3. llama_cloud/resources/llama_extract/__init__.py +0 -17
  4. llama_cloud/resources/llama_extract/client.py +113 -314
  5. llama_cloud/resources/organizations/client.py +15 -5
  6. llama_cloud/resources/parsing/client.py +153 -86
  7. llama_cloud/resources/pipelines/client.py +145 -10
  8. llama_cloud/resources/projects/client.py +25 -9
  9. llama_cloud/resources/reports/client.py +16 -6
  10. llama_cloud/types/__init__.py +44 -6
  11. llama_cloud/types/{plan.py → base_plan.py} +16 -13
  12. llama_cloud/types/base_plan_metronome_plan_type.py +17 -0
  13. llama_cloud/types/base_plan_name.py +45 -0
  14. llama_cloud/types/base_plan_plan_frequency.py +25 -0
  15. llama_cloud/types/billing_period.py +32 -0
  16. llama_cloud/types/{base.py → credit_type.py} +4 -1
  17. llama_cloud/types/data_source.py +1 -0
  18. llama_cloud/types/eval_dataset_job_record.py +1 -2
  19. llama_cloud/types/extract_agent_create.py +39 -0
  20. llama_cloud/types/extract_agent_update.py +38 -0
  21. llama_cloud/types/extract_schema_validate_request.py +32 -0
  22. llama_cloud/types/free_credits_usage.py +34 -0
  23. llama_cloud/types/job_record.py +2 -3
  24. llama_cloud/types/llama_parse_parameters.py +9 -0
  25. llama_cloud/types/llm_parameters.py +1 -0
  26. llama_cloud/types/page_screenshot_metadata.py +1 -0
  27. llama_cloud/types/paginated_list_cloud_documents_response.py +35 -0
  28. llama_cloud/types/parsing_mode.py +37 -0
  29. llama_cloud/types/pipeline_data_source.py +1 -0
  30. llama_cloud/types/pipeline_file.py +1 -0
  31. llama_cloud/types/plan_limits.py +52 -0
  32. llama_cloud/types/recurring_credit_grant.py +44 -0
  33. llama_cloud/types/usage.py +5 -4
  34. llama_cloud/types/usage_active_alerts_item.py +25 -0
  35. llama_cloud/types/{interval_usage_and_plan.py → usage_and_plan.py} +4 -6
  36. {llama_cloud-0.1.12.dist-info → llama_cloud-0.1.14.dist-info}/METADATA +2 -1
  37. {llama_cloud-0.1.12.dist-info → llama_cloud-0.1.14.dist-info}/RECORD +45 -33
  38. {llama_cloud-0.1.12.dist-info → llama_cloud-0.1.14.dist-info}/WHEEL +1 -1
  39. llama_cloud/resources/llama_extract/types/__init__.py +0 -17
  40. /llama_cloud/{resources/llama_extract/types → types}/extract_agent_create_data_schema.py +0 -0
  41. /llama_cloud/{resources/llama_extract/types → types}/extract_agent_create_data_schema_zero_value.py +0 -0
  42. /llama_cloud/{resources/llama_extract/types → types}/extract_agent_update_data_schema.py +0 -0
  43. /llama_cloud/{resources/llama_extract/types → types}/extract_agent_update_data_schema_zero_value.py +0 -0
  44. /llama_cloud/{resources/llama_extract/types → types}/extract_schema_validate_request_data_schema.py +0 -0
  45. /llama_cloud/{resources/llama_extract/types → types}/extract_schema_validate_request_data_schema_zero_value.py +0 -0
  46. {llama_cloud-0.1.12.dist-info → llama_cloud-0.1.14.dist-info}/LICENSE +0 -0
@@ -10,17 +10,16 @@ from ...core.jsonable_encoder import jsonable_encoder
10
10
  from ...core.remove_none_from_dict import remove_none_from_dict
11
11
  from ...errors.unprocessable_entity_error import UnprocessableEntityError
12
12
  from ...types.extract_agent import ExtractAgent
13
- from ...types.extract_config import ExtractConfig
13
+ from ...types.extract_agent_create import ExtractAgentCreate
14
+ from ...types.extract_agent_update import ExtractAgentUpdate
14
15
  from ...types.extract_job import ExtractJob
15
16
  from ...types.extract_job_create import ExtractJobCreate
16
17
  from ...types.extract_resultset import ExtractResultset
17
18
  from ...types.extract_run import ExtractRun
19
+ from ...types.extract_schema_validate_request import ExtractSchemaValidateRequest
18
20
  from ...types.extract_schema_validate_response import ExtractSchemaValidateResponse
19
21
  from ...types.http_validation_error import HttpValidationError
20
22
  from ...types.llama_extract_settings import LlamaExtractSettings
21
- from .types.extract_agent_create_data_schema import ExtractAgentCreateDataSchema
22
- from .types.extract_agent_update_data_schema import ExtractAgentUpdateDataSchema
23
- from .types.extract_schema_validate_request_data_schema import ExtractSchemaValidateRequestDataSchema
24
23
 
25
24
  try:
26
25
  import pydantic
@@ -52,7 +51,7 @@ class LlamaExtractClient:
52
51
  """
53
52
  _response = self._client_wrapper.httpx_client.request(
54
53
  "GET",
55
- urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/extraction-agents"),
54
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extraction/extraction-agents"),
56
55
  params=remove_none_from_dict({"project_id": project_id}),
57
56
  headers=self._client_wrapper.get_headers(),
58
57
  timeout=60,
@@ -72,9 +71,7 @@ class LlamaExtractClient:
72
71
  *,
73
72
  project_id: typing.Optional[str] = None,
74
73
  organization_id: typing.Optional[str] = None,
75
- name: str,
76
- data_schema: ExtractAgentCreateDataSchema,
77
- config: ExtractConfig,
74
+ request: ExtractAgentCreate,
78
75
  ) -> ExtractAgent:
79
76
  """
80
77
  Parameters:
@@ -82,31 +79,34 @@ class LlamaExtractClient:
82
79
 
83
80
  - organization_id: typing.Optional[str].
84
81
 
85
- - name: str. The name of the extraction schema
86
-
87
- - data_schema: ExtractAgentCreateDataSchema. The schema of the data.
88
-
89
- - config: ExtractConfig. The configuration parameters for the extraction agent.
82
+ - request: ExtractAgentCreate.
90
83
  ---
91
- from llama_cloud import ExtractConfig, ExtractMode, ExtractTarget
84
+ from llama_cloud import (
85
+ ExtractAgentCreate,
86
+ ExtractConfig,
87
+ ExtractMode,
88
+ ExtractTarget,
89
+ )
92
90
  from llama_cloud.client import LlamaCloud
93
91
 
94
92
  client = LlamaCloud(
95
93
  token="YOUR_TOKEN",
96
94
  )
97
95
  client.llama_extract.create_extraction_agent(
98
- name="string",
99
- config=ExtractConfig(
100
- extraction_target=ExtractTarget.PER_DOC,
101
- extraction_mode=ExtractMode.FAST,
96
+ request=ExtractAgentCreate(
97
+ name="string",
98
+ config=ExtractConfig(
99
+ extraction_target=ExtractTarget.PER_DOC,
100
+ extraction_mode=ExtractMode.FAST,
101
+ ),
102
102
  ),
103
103
  )
104
104
  """
105
105
  _response = self._client_wrapper.httpx_client.request(
106
106
  "POST",
107
- urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/extraction-agents"),
107
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extraction/extraction-agents"),
108
108
  params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
109
- json=jsonable_encoder({"name": name, "data_schema": data_schema, "config": config}),
109
+ json=jsonable_encoder(request),
110
110
  headers=self._client_wrapper.get_headers(),
111
111
  timeout=60,
112
112
  )
@@ -120,29 +120,30 @@ class LlamaExtractClient:
120
120
  raise ApiError(status_code=_response.status_code, body=_response.text)
121
121
  raise ApiError(status_code=_response.status_code, body=_response_json)
122
122
 
123
- def validate_extraction_schema(
124
- self, *, data_schema: ExtractSchemaValidateRequestDataSchema
125
- ) -> ExtractSchemaValidateResponse:
123
+ def validate_extraction_schema(self, *, request: ExtractSchemaValidateRequest) -> ExtractSchemaValidateResponse:
126
124
  """
127
125
  Validates an extraction agent's schema definition.
128
126
  Returns the normalized and validated schema if valid, otherwise raises an HTTP 400.
129
127
 
130
128
  Parameters:
131
- - data_schema: ExtractSchemaValidateRequestDataSchema.
129
+ - request: ExtractSchemaValidateRequest.
132
130
  ---
131
+ from llama_cloud import ExtractSchemaValidateRequest
133
132
  from llama_cloud.client import LlamaCloud
134
133
 
135
134
  client = LlamaCloud(
136
135
  token="YOUR_TOKEN",
137
136
  )
138
- client.llama_extract.validate_extraction_schema()
137
+ client.llama_extract.validate_extraction_schema(
138
+ request=ExtractSchemaValidateRequest(),
139
+ )
139
140
  """
140
141
  _response = self._client_wrapper.httpx_client.request(
141
142
  "POST",
142
143
  urllib.parse.urljoin(
143
- f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/extraction-agents/schema/validation"
144
+ f"{self._client_wrapper.get_base_url()}/", "api/v1/extraction/extraction-agents/schema/validation"
144
145
  ),
145
- json=jsonable_encoder({"data_schema": data_schema}),
146
+ json=jsonable_encoder(request),
146
147
  headers=self._client_wrapper.get_headers(),
147
148
  timeout=60,
148
149
  )
@@ -175,7 +176,7 @@ class LlamaExtractClient:
175
176
  _response = self._client_wrapper.httpx_client.request(
176
177
  "GET",
177
178
  urllib.parse.urljoin(
178
- f"{self._client_wrapper.get_base_url()}/", f"api/v1/extractionv2/extraction-agents/by-name/{name}"
179
+ f"{self._client_wrapper.get_base_url()}/", f"api/v1/extraction/extraction-agents/by-name/{name}"
179
180
  ),
180
181
  params=remove_none_from_dict({"project_id": project_id}),
181
182
  headers=self._client_wrapper.get_headers(),
@@ -208,8 +209,7 @@ class LlamaExtractClient:
208
209
  _response = self._client_wrapper.httpx_client.request(
209
210
  "GET",
210
211
  urllib.parse.urljoin(
211
- f"{self._client_wrapper.get_base_url()}/",
212
- f"api/v1/extractionv2/extraction-agents/{extraction_agent_id}",
212
+ f"{self._client_wrapper.get_base_url()}/", f"api/v1/extraction/extraction-agents/{extraction_agent_id}"
213
213
  ),
214
214
  headers=self._client_wrapper.get_headers(),
215
215
  timeout=60,
@@ -224,18 +224,19 @@ class LlamaExtractClient:
224
224
  raise ApiError(status_code=_response.status_code, body=_response.text)
225
225
  raise ApiError(status_code=_response.status_code, body=_response_json)
226
226
 
227
- def update_extraction_agent(
228
- self, extraction_agent_id: str, *, data_schema: ExtractAgentUpdateDataSchema, config: ExtractConfig
229
- ) -> ExtractAgent:
227
+ def update_extraction_agent(self, extraction_agent_id: str, *, request: ExtractAgentUpdate) -> ExtractAgent:
230
228
  """
231
229
  Parameters:
232
230
  - extraction_agent_id: str.
233
231
 
234
- - data_schema: ExtractAgentUpdateDataSchema. The schema of the data
235
-
236
- - config: ExtractConfig. The configuration parameters for the extraction agent.
232
+ - request: ExtractAgentUpdate.
237
233
  ---
238
- from llama_cloud import ExtractConfig, ExtractMode, ExtractTarget
234
+ from llama_cloud import (
235
+ ExtractAgentUpdate,
236
+ ExtractConfig,
237
+ ExtractMode,
238
+ ExtractTarget,
239
+ )
239
240
  from llama_cloud.client import LlamaCloud
240
241
 
241
242
  client = LlamaCloud(
@@ -243,19 +244,20 @@ class LlamaExtractClient:
243
244
  )
244
245
  client.llama_extract.update_extraction_agent(
245
246
  extraction_agent_id="string",
246
- config=ExtractConfig(
247
- extraction_target=ExtractTarget.PER_DOC,
248
- extraction_mode=ExtractMode.FAST,
247
+ request=ExtractAgentUpdate(
248
+ config=ExtractConfig(
249
+ extraction_target=ExtractTarget.PER_DOC,
250
+ extraction_mode=ExtractMode.FAST,
251
+ ),
249
252
  ),
250
253
  )
251
254
  """
252
255
  _response = self._client_wrapper.httpx_client.request(
253
256
  "PUT",
254
257
  urllib.parse.urljoin(
255
- f"{self._client_wrapper.get_base_url()}/",
256
- f"api/v1/extractionv2/extraction-agents/{extraction_agent_id}",
258
+ f"{self._client_wrapper.get_base_url()}/", f"api/v1/extraction/extraction-agents/{extraction_agent_id}"
257
259
  ),
258
- json=jsonable_encoder({"data_schema": data_schema, "config": config}),
260
+ json=jsonable_encoder(request),
259
261
  headers=self._client_wrapper.get_headers(),
260
262
  timeout=60,
261
263
  )
@@ -286,8 +288,7 @@ class LlamaExtractClient:
286
288
  _response = self._client_wrapper.httpx_client.request(
287
289
  "DELETE",
288
290
  urllib.parse.urljoin(
289
- f"{self._client_wrapper.get_base_url()}/",
290
- f"api/v1/extractionv2/extraction-agents/{extraction_agent_id}",
291
+ f"{self._client_wrapper.get_base_url()}/", f"api/v1/extraction/extraction-agents/{extraction_agent_id}"
291
292
  ),
292
293
  headers=self._client_wrapper.get_headers(),
293
294
  timeout=60,
@@ -318,7 +319,7 @@ class LlamaExtractClient:
318
319
  """
319
320
  _response = self._client_wrapper.httpx_client.request(
320
321
  "GET",
321
- urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/jobs"),
322
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extraction/jobs"),
322
323
  params=remove_none_from_dict({"extraction_agent_id": extraction_agent_id}),
323
324
  headers=self._client_wrapper.get_headers(),
324
325
  timeout=60,
@@ -362,7 +363,7 @@ class LlamaExtractClient:
362
363
  """
363
364
  _response = self._client_wrapper.httpx_client.request(
364
365
  "POST",
365
- urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/jobs"),
366
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extraction/jobs"),
366
367
  json=jsonable_encoder(request),
367
368
  headers=self._client_wrapper.get_headers(),
368
369
  timeout=60,
@@ -393,7 +394,7 @@ class LlamaExtractClient:
393
394
  """
394
395
  _response = self._client_wrapper.httpx_client.request(
395
396
  "GET",
396
- urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/extractionv2/jobs/{job_id}"),
397
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/extraction/jobs/{job_id}"),
397
398
  headers=self._client_wrapper.get_headers(),
398
399
  timeout=60,
399
400
  )
@@ -407,108 +408,6 @@ class LlamaExtractClient:
407
408
  raise ApiError(status_code=_response.status_code, body=_response.text)
408
409
  raise ApiError(status_code=_response.status_code, body=_response_json)
409
410
 
410
- def run_job_with_parsed_file_test(
411
- self, *, job_create: ExtractJobCreate, extract_settings: typing.Optional[LlamaExtractSettings] = OMIT
412
- ) -> ExtractResultset:
413
- """
414
- Parameters:
415
- - job_create: ExtractJobCreate.
416
-
417
- - extract_settings: typing.Optional[LlamaExtractSettings].
418
- ---
419
- from llama_cloud import (
420
- ChunkMode,
421
- ExtractConfig,
422
- ExtractJobCreate,
423
- ExtractMode,
424
- ExtractTarget,
425
- LlamaExtractSettings,
426
- LlamaParseParameters,
427
- )
428
- from llama_cloud.client import LlamaCloud
429
-
430
- client = LlamaCloud(
431
- token="YOUR_TOKEN",
432
- )
433
- client.llama_extract.run_job_with_parsed_file_test(
434
- job_create=ExtractJobCreate(
435
- extraction_agent_id="string",
436
- file_id="string",
437
- config_override=ExtractConfig(
438
- extraction_target=ExtractTarget.PER_DOC,
439
- extraction_mode=ExtractMode.FAST,
440
- ),
441
- ),
442
- extract_settings=LlamaExtractSettings(
443
- chunk_mode=ChunkMode.PAGE,
444
- llama_parse_params=LlamaParseParameters(),
445
- ),
446
- )
447
- """
448
- _request: typing.Dict[str, typing.Any] = {"job_create": job_create}
449
- if extract_settings is not OMIT:
450
- _request["extract_settings"] = extract_settings
451
- _response = self._client_wrapper.httpx_client.request(
452
- "POST",
453
- urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/jobs/parsed/test"),
454
- json=jsonable_encoder(_request),
455
- headers=self._client_wrapper.get_headers(),
456
- timeout=60,
457
- )
458
- if 200 <= _response.status_code < 300:
459
- return pydantic.parse_obj_as(ExtractResultset, _response.json()) # type: ignore
460
- if _response.status_code == 422:
461
- raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
462
- try:
463
- _response_json = _response.json()
464
- except JSONDecodeError:
465
- raise ApiError(status_code=_response.status_code, body=_response.text)
466
- raise ApiError(status_code=_response.status_code, body=_response_json)
467
-
468
- def run_job_with_parsed_file(self, *, request: ExtractJobCreate) -> ExtractResultset:
469
- """
470
- Parameters:
471
- - request: ExtractJobCreate.
472
- ---
473
- from llama_cloud import (
474
- ExtractConfig,
475
- ExtractJobCreate,
476
- ExtractMode,
477
- ExtractTarget,
478
- )
479
- from llama_cloud.client import LlamaCloud
480
-
481
- client = LlamaCloud(
482
- token="YOUR_TOKEN",
483
- )
484
- client.llama_extract.run_job_with_parsed_file(
485
- request=ExtractJobCreate(
486
- extraction_agent_id="string",
487
- file_id="string",
488
- config_override=ExtractConfig(
489
- extraction_target=ExtractTarget.PER_DOC,
490
- extraction_mode=ExtractMode.FAST,
491
- ),
492
- ),
493
- )
494
- """
495
- _response = self._client_wrapper.httpx_client.request(
496
- "POST",
497
- urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/jobs/parsed"),
498
- json=jsonable_encoder(request),
499
- headers=self._client_wrapper.get_headers(),
500
- timeout=60,
501
- )
502
- if 200 <= _response.status_code < 300:
503
- return pydantic.parse_obj_as(ExtractResultset, _response.json()) # type: ignore
504
- if _response.status_code == 422:
505
- raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
506
- try:
507
- _response_json = _response.json()
508
- except JSONDecodeError:
509
- raise ApiError(status_code=_response.status_code, body=_response.text)
510
- raise ApiError(status_code=_response.status_code, body=_response_json)
511
-
512
411
  def run_job_test_user(
513
412
  self, *, job_create: ExtractJobCreate, extract_settings: typing.Optional[LlamaExtractSettings] = OMIT
514
413
  ) -> ExtractJob:
@@ -526,6 +425,7 @@ class LlamaExtractClient:
526
425
  ExtractTarget,
527
426
  LlamaExtractSettings,
528
427
  LlamaParseParameters,
428
+ ParsingMode,
529
429
  )
530
430
  from llama_cloud.client import LlamaCloud
531
431
 
@@ -543,7 +443,9 @@ class LlamaExtractClient:
543
443
  ),
544
444
  extract_settings=LlamaExtractSettings(
545
445
  chunk_mode=ChunkMode.PAGE,
546
- llama_parse_params=LlamaParseParameters(),
446
+ llama_parse_params=LlamaParseParameters(
447
+ parse_mode=ParsingMode.PARSE_PAGE_WITHOUT_LLM,
448
+ ),
547
449
  ),
548
450
  )
549
451
  """
@@ -552,7 +454,7 @@ class LlamaExtractClient:
552
454
  _request["extract_settings"] = extract_settings
553
455
  _response = self._client_wrapper.httpx_client.request(
554
456
  "POST",
555
- urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/jobs/test"),
457
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extraction/jobs/test"),
556
458
  json=jsonable_encoder(_request),
557
459
  headers=self._client_wrapper.get_headers(),
558
460
  timeout=60,
@@ -583,9 +485,7 @@ class LlamaExtractClient:
583
485
  """
584
486
  _response = self._client_wrapper.httpx_client.request(
585
487
  "GET",
586
- urllib.parse.urljoin(
587
- f"{self._client_wrapper.get_base_url()}/", f"api/v1/extractionv2/jobs/{job_id}/result"
588
- ),
488
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/extraction/jobs/{job_id}/result"),
589
489
  headers=self._client_wrapper.get_headers(),
590
490
  timeout=60,
591
491
  )
@@ -615,7 +515,7 @@ class LlamaExtractClient:
615
515
  """
616
516
  _response = self._client_wrapper.httpx_client.request(
617
517
  "GET",
618
- urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/runs"),
518
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extraction/runs"),
619
519
  params=remove_none_from_dict({"extraction_agent_id": extraction_agent_id}),
620
520
  headers=self._client_wrapper.get_headers(),
621
521
  timeout=60,
@@ -646,9 +546,7 @@ class LlamaExtractClient:
646
546
  """
647
547
  _response = self._client_wrapper.httpx_client.request(
648
548
  "GET",
649
- urllib.parse.urljoin(
650
- f"{self._client_wrapper.get_base_url()}/", f"api/v1/extractionv2/runs/by-job/{job_id}"
651
- ),
549
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/extraction/runs/by-job/{job_id}"),
652
550
  headers=self._client_wrapper.get_headers(),
653
551
  timeout=60,
654
552
  )
@@ -678,7 +576,7 @@ class LlamaExtractClient:
678
576
  """
679
577
  _response = self._client_wrapper.httpx_client.request(
680
578
  "GET",
681
- urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/extractionv2/runs/{run_id}"),
579
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/extraction/runs/{run_id}"),
682
580
  headers=self._client_wrapper.get_headers(),
683
581
  timeout=60,
684
582
  )
@@ -711,7 +609,7 @@ class AsyncLlamaExtractClient:
711
609
  """
712
610
  _response = await self._client_wrapper.httpx_client.request(
713
611
  "GET",
714
- urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/extraction-agents"),
612
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extraction/extraction-agents"),
715
613
  params=remove_none_from_dict({"project_id": project_id}),
716
614
  headers=self._client_wrapper.get_headers(),
717
615
  timeout=60,
@@ -731,9 +629,7 @@ class AsyncLlamaExtractClient:
731
629
  *,
732
630
  project_id: typing.Optional[str] = None,
733
631
  organization_id: typing.Optional[str] = None,
734
- name: str,
735
- data_schema: ExtractAgentCreateDataSchema,
736
- config: ExtractConfig,
632
+ request: ExtractAgentCreate,
737
633
  ) -> ExtractAgent:
738
634
  """
739
635
  Parameters:
@@ -741,31 +637,34 @@ class AsyncLlamaExtractClient:
741
637
 
742
638
  - organization_id: typing.Optional[str].
743
639
 
744
- - name: str. The name of the extraction schema
745
-
746
- - data_schema: ExtractAgentCreateDataSchema. The schema of the data.
747
-
748
- - config: ExtractConfig. The configuration parameters for the extraction agent.
640
+ - request: ExtractAgentCreate.
749
641
  ---
750
- from llama_cloud import ExtractConfig, ExtractMode, ExtractTarget
642
+ from llama_cloud import (
643
+ ExtractAgentCreate,
644
+ ExtractConfig,
645
+ ExtractMode,
646
+ ExtractTarget,
647
+ )
751
648
  from llama_cloud.client import AsyncLlamaCloud
752
649
 
753
650
  client = AsyncLlamaCloud(
754
651
  token="YOUR_TOKEN",
755
652
  )
756
653
  await client.llama_extract.create_extraction_agent(
757
- name="string",
758
- config=ExtractConfig(
759
- extraction_target=ExtractTarget.PER_DOC,
760
- extraction_mode=ExtractMode.FAST,
654
+ request=ExtractAgentCreate(
655
+ name="string",
656
+ config=ExtractConfig(
657
+ extraction_target=ExtractTarget.PER_DOC,
658
+ extraction_mode=ExtractMode.FAST,
659
+ ),
761
660
  ),
762
661
  )
763
662
  """
764
663
  _response = await self._client_wrapper.httpx_client.request(
765
664
  "POST",
766
- urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/extraction-agents"),
665
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extraction/extraction-agents"),
767
666
  params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
768
- json=jsonable_encoder({"name": name, "data_schema": data_schema, "config": config}),
667
+ json=jsonable_encoder(request),
769
668
  headers=self._client_wrapper.get_headers(),
770
669
  timeout=60,
771
670
  )
@@ -780,28 +679,31 @@ class AsyncLlamaExtractClient:
780
679
  raise ApiError(status_code=_response.status_code, body=_response_json)
781
680
 
782
681
  async def validate_extraction_schema(
783
- self, *, data_schema: ExtractSchemaValidateRequestDataSchema
682
+ self, *, request: ExtractSchemaValidateRequest
784
683
  ) -> ExtractSchemaValidateResponse:
785
684
  """
786
685
  Validates an extraction agent's schema definition.
787
686
  Returns the normalized and validated schema if valid, otherwise raises an HTTP 400.
788
687
 
789
688
  Parameters:
790
- - data_schema: ExtractSchemaValidateRequestDataSchema.
689
+ - request: ExtractSchemaValidateRequest.
791
690
  ---
691
+ from llama_cloud import ExtractSchemaValidateRequest
792
692
  from llama_cloud.client import AsyncLlamaCloud
793
693
 
794
694
  client = AsyncLlamaCloud(
795
695
  token="YOUR_TOKEN",
796
696
  )
797
- await client.llama_extract.validate_extraction_schema()
697
+ await client.llama_extract.validate_extraction_schema(
698
+ request=ExtractSchemaValidateRequest(),
699
+ )
798
700
  """
799
701
  _response = await self._client_wrapper.httpx_client.request(
800
702
  "POST",
801
703
  urllib.parse.urljoin(
802
- f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/extraction-agents/schema/validation"
704
+ f"{self._client_wrapper.get_base_url()}/", "api/v1/extraction/extraction-agents/schema/validation"
803
705
  ),
804
- json=jsonable_encoder({"data_schema": data_schema}),
706
+ json=jsonable_encoder(request),
805
707
  headers=self._client_wrapper.get_headers(),
806
708
  timeout=60,
807
709
  )
@@ -834,7 +736,7 @@ class AsyncLlamaExtractClient:
834
736
  _response = await self._client_wrapper.httpx_client.request(
835
737
  "GET",
836
738
  urllib.parse.urljoin(
837
- f"{self._client_wrapper.get_base_url()}/", f"api/v1/extractionv2/extraction-agents/by-name/{name}"
739
+ f"{self._client_wrapper.get_base_url()}/", f"api/v1/extraction/extraction-agents/by-name/{name}"
838
740
  ),
839
741
  params=remove_none_from_dict({"project_id": project_id}),
840
742
  headers=self._client_wrapper.get_headers(),
@@ -867,8 +769,7 @@ class AsyncLlamaExtractClient:
867
769
  _response = await self._client_wrapper.httpx_client.request(
868
770
  "GET",
869
771
  urllib.parse.urljoin(
870
- f"{self._client_wrapper.get_base_url()}/",
871
- f"api/v1/extractionv2/extraction-agents/{extraction_agent_id}",
772
+ f"{self._client_wrapper.get_base_url()}/", f"api/v1/extraction/extraction-agents/{extraction_agent_id}"
872
773
  ),
873
774
  headers=self._client_wrapper.get_headers(),
874
775
  timeout=60,
@@ -883,18 +784,19 @@ class AsyncLlamaExtractClient:
883
784
  raise ApiError(status_code=_response.status_code, body=_response.text)
884
785
  raise ApiError(status_code=_response.status_code, body=_response_json)
885
786
 
886
- async def update_extraction_agent(
887
- self, extraction_agent_id: str, *, data_schema: ExtractAgentUpdateDataSchema, config: ExtractConfig
888
- ) -> ExtractAgent:
787
+ async def update_extraction_agent(self, extraction_agent_id: str, *, request: ExtractAgentUpdate) -> ExtractAgent:
889
788
  """
890
789
  Parameters:
891
790
  - extraction_agent_id: str.
892
791
 
893
- - data_schema: ExtractAgentUpdateDataSchema. The schema of the data
894
-
895
- - config: ExtractConfig. The configuration parameters for the extraction agent.
792
+ - request: ExtractAgentUpdate.
896
793
  ---
897
- from llama_cloud import ExtractConfig, ExtractMode, ExtractTarget
794
+ from llama_cloud import (
795
+ ExtractAgentUpdate,
796
+ ExtractConfig,
797
+ ExtractMode,
798
+ ExtractTarget,
799
+ )
898
800
  from llama_cloud.client import AsyncLlamaCloud
899
801
 
900
802
  client = AsyncLlamaCloud(
@@ -902,19 +804,20 @@ class AsyncLlamaExtractClient:
902
804
  )
903
805
  await client.llama_extract.update_extraction_agent(
904
806
  extraction_agent_id="string",
905
- config=ExtractConfig(
906
- extraction_target=ExtractTarget.PER_DOC,
907
- extraction_mode=ExtractMode.FAST,
807
+ request=ExtractAgentUpdate(
808
+ config=ExtractConfig(
809
+ extraction_target=ExtractTarget.PER_DOC,
810
+ extraction_mode=ExtractMode.FAST,
811
+ ),
908
812
  ),
909
813
  )
910
814
  """
911
815
  _response = await self._client_wrapper.httpx_client.request(
912
816
  "PUT",
913
817
  urllib.parse.urljoin(
914
- f"{self._client_wrapper.get_base_url()}/",
915
- f"api/v1/extractionv2/extraction-agents/{extraction_agent_id}",
818
+ f"{self._client_wrapper.get_base_url()}/", f"api/v1/extraction/extraction-agents/{extraction_agent_id}"
916
819
  ),
917
- json=jsonable_encoder({"data_schema": data_schema, "config": config}),
820
+ json=jsonable_encoder(request),
918
821
  headers=self._client_wrapper.get_headers(),
919
822
  timeout=60,
920
823
  )
@@ -945,8 +848,7 @@ class AsyncLlamaExtractClient:
945
848
  _response = await self._client_wrapper.httpx_client.request(
946
849
  "DELETE",
947
850
  urllib.parse.urljoin(
948
- f"{self._client_wrapper.get_base_url()}/",
949
- f"api/v1/extractionv2/extraction-agents/{extraction_agent_id}",
851
+ f"{self._client_wrapper.get_base_url()}/", f"api/v1/extraction/extraction-agents/{extraction_agent_id}"
950
852
  ),
951
853
  headers=self._client_wrapper.get_headers(),
952
854
  timeout=60,
@@ -977,7 +879,7 @@ class AsyncLlamaExtractClient:
977
879
  """
978
880
  _response = await self._client_wrapper.httpx_client.request(
979
881
  "GET",
980
- urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/jobs"),
882
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extraction/jobs"),
981
883
  params=remove_none_from_dict({"extraction_agent_id": extraction_agent_id}),
982
884
  headers=self._client_wrapper.get_headers(),
983
885
  timeout=60,
@@ -1021,7 +923,7 @@ class AsyncLlamaExtractClient:
1021
923
  """
1022
924
  _response = await self._client_wrapper.httpx_client.request(
1023
925
  "POST",
1024
- urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/jobs"),
926
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extraction/jobs"),
1025
927
  json=jsonable_encoder(request),
1026
928
  headers=self._client_wrapper.get_headers(),
1027
929
  timeout=60,
@@ -1052,7 +954,7 @@ class AsyncLlamaExtractClient:
1052
954
  """
1053
955
  _response = await self._client_wrapper.httpx_client.request(
1054
956
  "GET",
1055
- urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/extractionv2/jobs/{job_id}"),
957
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/extraction/jobs/{job_id}"),
1056
958
  headers=self._client_wrapper.get_headers(),
1057
959
  timeout=60,
1058
960
  )
@@ -1066,108 +968,6 @@ class AsyncLlamaExtractClient:
1066
968
  raise ApiError(status_code=_response.status_code, body=_response.text)
1067
969
  raise ApiError(status_code=_response.status_code, body=_response_json)
1068
970
 
1069
- async def run_job_with_parsed_file_test(
1070
- self, *, job_create: ExtractJobCreate, extract_settings: typing.Optional[LlamaExtractSettings] = OMIT
1071
- ) -> ExtractResultset:
1072
- """
1073
- Parameters:
1074
- - job_create: ExtractJobCreate.
1075
-
1076
- - extract_settings: typing.Optional[LlamaExtractSettings].
1077
- ---
1078
- from llama_cloud import (
1079
- ChunkMode,
1080
- ExtractConfig,
1081
- ExtractJobCreate,
1082
- ExtractMode,
1083
- ExtractTarget,
1084
- LlamaExtractSettings,
1085
- LlamaParseParameters,
1086
- )
1087
- from llama_cloud.client import AsyncLlamaCloud
1088
-
1089
- client = AsyncLlamaCloud(
1090
- token="YOUR_TOKEN",
1091
- )
1092
- await client.llama_extract.run_job_with_parsed_file_test(
1093
- job_create=ExtractJobCreate(
1094
- extraction_agent_id="string",
1095
- file_id="string",
1096
- config_override=ExtractConfig(
1097
- extraction_target=ExtractTarget.PER_DOC,
1098
- extraction_mode=ExtractMode.FAST,
1099
- ),
1100
- ),
1101
- extract_settings=LlamaExtractSettings(
1102
- chunk_mode=ChunkMode.PAGE,
1103
- llama_parse_params=LlamaParseParameters(),
1104
- ),
1105
- )
1106
- """
1107
- _request: typing.Dict[str, typing.Any] = {"job_create": job_create}
1108
- if extract_settings is not OMIT:
1109
- _request["extract_settings"] = extract_settings
1110
- _response = await self._client_wrapper.httpx_client.request(
1111
- "POST",
1112
- urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/jobs/parsed/test"),
1113
- json=jsonable_encoder(_request),
1114
- headers=self._client_wrapper.get_headers(),
1115
- timeout=60,
1116
- )
1117
- if 200 <= _response.status_code < 300:
1118
- return pydantic.parse_obj_as(ExtractResultset, _response.json()) # type: ignore
1119
- if _response.status_code == 422:
1120
- raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
1121
- try:
1122
- _response_json = _response.json()
1123
- except JSONDecodeError:
1124
- raise ApiError(status_code=_response.status_code, body=_response.text)
1125
- raise ApiError(status_code=_response.status_code, body=_response_json)
1126
-
1127
- async def run_job_with_parsed_file(self, *, request: ExtractJobCreate) -> ExtractResultset:
1128
- """
1129
- Parameters:
1130
- - request: ExtractJobCreate.
1131
- ---
1132
- from llama_cloud import (
1133
- ExtractConfig,
1134
- ExtractJobCreate,
1135
- ExtractMode,
1136
- ExtractTarget,
1137
- )
1138
- from llama_cloud.client import AsyncLlamaCloud
1139
-
1140
- client = AsyncLlamaCloud(
1141
- token="YOUR_TOKEN",
1142
- )
1143
- await client.llama_extract.run_job_with_parsed_file(
1144
- request=ExtractJobCreate(
1145
- extraction_agent_id="string",
1146
- file_id="string",
1147
- config_override=ExtractConfig(
1148
- extraction_target=ExtractTarget.PER_DOC,
1149
- extraction_mode=ExtractMode.FAST,
1150
- ),
1151
- ),
1152
- )
1153
- """
1154
- _response = await self._client_wrapper.httpx_client.request(
1155
- "POST",
1156
- urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/jobs/parsed"),
1157
- json=jsonable_encoder(request),
1158
- headers=self._client_wrapper.get_headers(),
1159
- timeout=60,
1160
- )
1161
- if 200 <= _response.status_code < 300:
1162
- return pydantic.parse_obj_as(ExtractResultset, _response.json()) # type: ignore
1163
- if _response.status_code == 422:
1164
- raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
1165
- try:
1166
- _response_json = _response.json()
1167
- except JSONDecodeError:
1168
- raise ApiError(status_code=_response.status_code, body=_response.text)
1169
- raise ApiError(status_code=_response.status_code, body=_response_json)
1170
-
1171
971
  async def run_job_test_user(
1172
972
  self, *, job_create: ExtractJobCreate, extract_settings: typing.Optional[LlamaExtractSettings] = OMIT
1173
973
  ) -> ExtractJob:
@@ -1185,6 +985,7 @@ class AsyncLlamaExtractClient:
1185
985
  ExtractTarget,
1186
986
  LlamaExtractSettings,
1187
987
  LlamaParseParameters,
988
+ ParsingMode,
1188
989
  )
1189
990
  from llama_cloud.client import AsyncLlamaCloud
1190
991
 
@@ -1202,7 +1003,9 @@ class AsyncLlamaExtractClient:
1202
1003
  ),
1203
1004
  extract_settings=LlamaExtractSettings(
1204
1005
  chunk_mode=ChunkMode.PAGE,
1205
- llama_parse_params=LlamaParseParameters(),
1006
+ llama_parse_params=LlamaParseParameters(
1007
+ parse_mode=ParsingMode.PARSE_PAGE_WITHOUT_LLM,
1008
+ ),
1206
1009
  ),
1207
1010
  )
1208
1011
  """
@@ -1211,7 +1014,7 @@ class AsyncLlamaExtractClient:
1211
1014
  _request["extract_settings"] = extract_settings
1212
1015
  _response = await self._client_wrapper.httpx_client.request(
1213
1016
  "POST",
1214
- urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/jobs/test"),
1017
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extraction/jobs/test"),
1215
1018
  json=jsonable_encoder(_request),
1216
1019
  headers=self._client_wrapper.get_headers(),
1217
1020
  timeout=60,
@@ -1242,9 +1045,7 @@ class AsyncLlamaExtractClient:
1242
1045
  """
1243
1046
  _response = await self._client_wrapper.httpx_client.request(
1244
1047
  "GET",
1245
- urllib.parse.urljoin(
1246
- f"{self._client_wrapper.get_base_url()}/", f"api/v1/extractionv2/jobs/{job_id}/result"
1247
- ),
1048
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/extraction/jobs/{job_id}/result"),
1248
1049
  headers=self._client_wrapper.get_headers(),
1249
1050
  timeout=60,
1250
1051
  )
@@ -1274,7 +1075,7 @@ class AsyncLlamaExtractClient:
1274
1075
  """
1275
1076
  _response = await self._client_wrapper.httpx_client.request(
1276
1077
  "GET",
1277
- urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/runs"),
1078
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extraction/runs"),
1278
1079
  params=remove_none_from_dict({"extraction_agent_id": extraction_agent_id}),
1279
1080
  headers=self._client_wrapper.get_headers(),
1280
1081
  timeout=60,
@@ -1305,9 +1106,7 @@ class AsyncLlamaExtractClient:
1305
1106
  """
1306
1107
  _response = await self._client_wrapper.httpx_client.request(
1307
1108
  "GET",
1308
- urllib.parse.urljoin(
1309
- f"{self._client_wrapper.get_base_url()}/", f"api/v1/extractionv2/runs/by-job/{job_id}"
1310
- ),
1109
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/extraction/runs/by-job/{job_id}"),
1311
1110
  headers=self._client_wrapper.get_headers(),
1312
1111
  timeout=60,
1313
1112
  )
@@ -1337,7 +1136,7 @@ class AsyncLlamaExtractClient:
1337
1136
  """
1338
1137
  _response = await self._client_wrapper.httpx_client.request(
1339
1138
  "GET",
1340
- urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/extractionv2/runs/{run_id}"),
1139
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/extraction/runs/{run_id}"),
1341
1140
  headers=self._client_wrapper.get_headers(),
1342
1141
  timeout=60,
1343
1142
  )