llama-cloud 0.1.7a1__py3-none-any.whl → 0.1.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of llama-cloud might be problematic. Click here for more details.

Files changed (31) hide show
  1. llama_cloud/__init__.py +16 -16
  2. llama_cloud/client.py +0 -3
  3. llama_cloud/resources/__init__.py +0 -5
  4. llama_cloud/resources/chat_apps/client.py +10 -0
  5. llama_cloud/resources/files/client.py +305 -6
  6. llama_cloud/resources/llama_extract/client.py +376 -276
  7. llama_cloud/resources/parsing/client.py +106 -18
  8. llama_cloud/resources/reports/client.py +4 -4
  9. llama_cloud/types/__init__.py +16 -10
  10. llama_cloud/types/composite_retrieval_result.py +2 -2
  11. llama_cloud/types/{extraction_job.py → composite_retrieved_text_node_with_score.py} +5 -6
  12. llama_cloud/types/extract_job.py +3 -0
  13. llama_cloud/types/extract_resultset.py +2 -6
  14. llama_cloud/types/extract_run.py +54 -0
  15. llama_cloud/types/{extraction_result_data_value.py → extract_run_data_schema_value.py} +1 -1
  16. llama_cloud/types/extract_run_data_value.py +5 -0
  17. llama_cloud/types/{extraction_schema_data_schema_value.py → extract_run_extraction_metadata_value.py} +1 -1
  18. llama_cloud/types/extract_state.py +29 -0
  19. llama_cloud/types/{extraction_result.py → llama_extract_settings.py} +12 -11
  20. llama_cloud/types/llama_parse_parameters.py +6 -0
  21. llama_cloud/types/{extraction_schema.py → page_figure_metadata.py} +7 -12
  22. llama_cloud/types/report_metadata.py +4 -0
  23. {llama_cloud-0.1.7a1.dist-info → llama_cloud-0.1.9.dist-info}/METADATA +2 -1
  24. {llama_cloud-0.1.7a1.dist-info → llama_cloud-0.1.9.dist-info}/RECORD +26 -28
  25. {llama_cloud-0.1.7a1.dist-info → llama_cloud-0.1.9.dist-info}/WHEEL +1 -1
  26. llama_cloud/resources/extraction/__init__.py +0 -5
  27. llama_cloud/resources/extraction/client.py +0 -756
  28. llama_cloud/resources/extraction/types/__init__.py +0 -6
  29. llama_cloud/resources/extraction/types/extraction_schema_create_data_schema_value.py +0 -7
  30. llama_cloud/resources/extraction/types/extraction_schema_update_data_schema_value.py +0 -7
  31. {llama_cloud-0.1.7a1.dist-info → llama_cloud-0.1.9.dist-info}/LICENSE +0 -0
@@ -14,7 +14,9 @@ from ...types.extract_config import ExtractConfig
14
14
  from ...types.extract_job import ExtractJob
15
15
  from ...types.extract_job_create import ExtractJobCreate
16
16
  from ...types.extract_resultset import ExtractResultset
17
+ from ...types.extract_run import ExtractRun
17
18
  from ...types.http_validation_error import HttpValidationError
19
+ from ...types.llama_extract_settings import LlamaExtractSettings
18
20
  from .types.extract_agent_create_data_schema_value import ExtractAgentCreateDataSchemaValue
19
21
  from .types.extract_agent_update_data_schema_value import ExtractAgentUpdateDataSchemaValue
20
22
 
@@ -34,6 +36,39 @@ class LlamaExtractClient:
34
36
  def __init__(self, *, client_wrapper: SyncClientWrapper):
35
37
  self._client_wrapper = client_wrapper
36
38
 
39
+ def list_extraction_agents(
40
+ self, *, project_id: typing.Optional[str] = None, name: typing.Optional[str] = None
41
+ ) -> typing.List[ExtractAgent]:
42
+ """
43
+ Parameters:
44
+ - project_id: typing.Optional[str].
45
+
46
+ - name: typing.Optional[str].
47
+ ---
48
+ from llama_cloud.client import LlamaCloud
49
+
50
+ client = LlamaCloud(
51
+ token="YOUR_TOKEN",
52
+ )
53
+ client.llama_extract.list_extraction_agents()
54
+ """
55
+ _response = self._client_wrapper.httpx_client.request(
56
+ "GET",
57
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/extraction-agents"),
58
+ params=remove_none_from_dict({"project_id": project_id, "name": name}),
59
+ headers=self._client_wrapper.get_headers(),
60
+ timeout=60,
61
+ )
62
+ if 200 <= _response.status_code < 300:
63
+ return pydantic.parse_obj_as(typing.List[ExtractAgent], _response.json()) # type: ignore
64
+ if _response.status_code == 422:
65
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
66
+ try:
67
+ _response_json = _response.json()
68
+ except JSONDecodeError:
69
+ raise ApiError(status_code=_response.status_code, body=_response.text)
70
+ raise ApiError(status_code=_response.status_code, body=_response_json)
71
+
37
72
  def create_extraction_agent(
38
73
  self,
39
74
  *,
@@ -71,7 +106,7 @@ class LlamaExtractClient:
71
106
  """
72
107
  _response = self._client_wrapper.httpx_client.request(
73
108
  "POST",
74
- urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/extraction_agent"),
109
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/extraction-agents"),
75
110
  params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
76
111
  json=jsonable_encoder({"name": name, "data_schema": data_schema, "config": config}),
77
112
  headers=self._client_wrapper.get_headers(),
@@ -87,55 +122,10 @@ class LlamaExtractClient:
87
122
  raise ApiError(status_code=_response.status_code, body=_response.text)
88
123
  raise ApiError(status_code=_response.status_code, body=_response_json)
89
124
 
90
- def list_extraction_agents(
91
- self, *, project_id: str, organization_id: typing.Optional[str] = None
92
- ) -> typing.List[ExtractAgent]:
93
- """
94
- Parameters:
95
- - project_id: str.
96
-
97
- - organization_id: typing.Optional[str].
98
- ---
99
- from llama_cloud.client import LlamaCloud
100
-
101
- client = LlamaCloud(
102
- token="YOUR_TOKEN",
103
- )
104
- client.llama_extract.list_extraction_agents(
105
- project_id="string",
106
- )
107
- """
108
- _response = self._client_wrapper.httpx_client.request(
109
- "GET",
110
- urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/extraction_agents"),
111
- params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
112
- headers=self._client_wrapper.get_headers(),
113
- timeout=60,
114
- )
115
- if 200 <= _response.status_code < 300:
116
- return pydantic.parse_obj_as(typing.List[ExtractAgent], _response.json()) # type: ignore
117
- if _response.status_code == 422:
118
- raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
119
- try:
120
- _response_json = _response.json()
121
- except JSONDecodeError:
122
- raise ApiError(status_code=_response.status_code, body=_response.text)
123
- raise ApiError(status_code=_response.status_code, body=_response_json)
124
-
125
- def get_extraction_agent(
126
- self,
127
- extraction_agent_id: str,
128
- *,
129
- project_id: typing.Optional[str] = None,
130
- organization_id: typing.Optional[str] = None,
131
- ) -> ExtractAgent:
125
+ def get_extraction_agent(self, extraction_agent_id: str) -> ExtractAgent:
132
126
  """
133
127
  Parameters:
134
128
  - extraction_agent_id: str.
135
-
136
- - project_id: typing.Optional[str].
137
-
138
- - organization_id: typing.Optional[str].
139
129
  ---
140
130
  from llama_cloud.client import LlamaCloud
141
131
 
@@ -150,9 +140,8 @@ class LlamaExtractClient:
150
140
  "GET",
151
141
  urllib.parse.urljoin(
152
142
  f"{self._client_wrapper.get_base_url()}/",
153
- f"api/v1/extractionv2/extraction_agents/{extraction_agent_id}",
143
+ f"api/v1/extractionv2/extraction-agents/{extraction_agent_id}",
154
144
  ),
155
- params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
156
145
  headers=self._client_wrapper.get_headers(),
157
146
  timeout=60,
158
147
  )
@@ -170,8 +159,6 @@ class LlamaExtractClient:
170
159
  self,
171
160
  extraction_agent_id: str,
172
161
  *,
173
- project_id: typing.Optional[str] = None,
174
- organization_id: typing.Optional[str] = None,
175
162
  data_schema: typing.Dict[str, typing.Optional[ExtractAgentUpdateDataSchemaValue]],
176
163
  config: ExtractConfig,
177
164
  ) -> ExtractAgent:
@@ -179,10 +166,6 @@ class LlamaExtractClient:
179
166
  Parameters:
180
167
  - extraction_agent_id: str.
181
168
 
182
- - project_id: typing.Optional[str].
183
-
184
- - organization_id: typing.Optional[str].
185
-
186
169
  - data_schema: typing.Dict[str, typing.Optional[ExtractAgentUpdateDataSchemaValue]]. The schema of the data
187
170
 
188
171
  - config: ExtractConfig. The configuration parameters for the extraction agent.
@@ -205,9 +188,8 @@ class LlamaExtractClient:
205
188
  "PUT",
206
189
  urllib.parse.urljoin(
207
190
  f"{self._client_wrapper.get_base_url()}/",
208
- f"api/v1/extractionv2/extraction_agents/{extraction_agent_id}",
191
+ f"api/v1/extractionv2/extraction-agents/{extraction_agent_id}",
209
192
  ),
210
- params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
211
193
  json=jsonable_encoder({"data_schema": data_schema, "config": config}),
212
194
  headers=self._client_wrapper.get_headers(),
213
195
  timeout=60,
@@ -222,20 +204,10 @@ class LlamaExtractClient:
222
204
  raise ApiError(status_code=_response.status_code, body=_response.text)
223
205
  raise ApiError(status_code=_response.status_code, body=_response_json)
224
206
 
225
- def delete_extraction_agent(
226
- self,
227
- extraction_agent_id: str,
228
- *,
229
- project_id: typing.Optional[str] = None,
230
- organization_id: typing.Optional[str] = None,
231
- ) -> typing.Any:
207
+ def delete_extraction_agent(self, extraction_agent_id: str) -> typing.Any:
232
208
  """
233
209
  Parameters:
234
210
  - extraction_agent_id: str.
235
-
236
- - project_id: typing.Optional[str].
237
-
238
- - organization_id: typing.Optional[str].
239
211
  ---
240
212
  from llama_cloud.client import LlamaCloud
241
213
 
@@ -250,9 +222,8 @@ class LlamaExtractClient:
250
222
  "DELETE",
251
223
  urllib.parse.urljoin(
252
224
  f"{self._client_wrapper.get_base_url()}/",
253
- f"api/v1/extractionv2/extraction_agents/{extraction_agent_id}",
225
+ f"api/v1/extractionv2/extraction-agents/{extraction_agent_id}",
254
226
  ),
255
- params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
256
227
  headers=self._client_wrapper.get_headers(),
257
228
  timeout=60,
258
229
  )
@@ -266,20 +237,10 @@ class LlamaExtractClient:
266
237
  raise ApiError(status_code=_response.status_code, body=_response.text)
267
238
  raise ApiError(status_code=_response.status_code, body=_response_json)
268
239
 
269
- def list_jobs(
270
- self,
271
- *,
272
- extraction_agent_id: str,
273
- project_id: typing.Optional[str] = None,
274
- organization_id: typing.Optional[str] = None,
275
- ) -> typing.List[ExtractJob]:
240
+ def list_jobs(self, *, extraction_agent_id: str) -> typing.List[ExtractJob]:
276
241
  """
277
242
  Parameters:
278
243
  - extraction_agent_id: str.
279
-
280
- - project_id: typing.Optional[str].
281
-
282
- - organization_id: typing.Optional[str].
283
244
  ---
284
245
  from llama_cloud.client import LlamaCloud
285
246
 
@@ -293,13 +254,7 @@ class LlamaExtractClient:
293
254
  _response = self._client_wrapper.httpx_client.request(
294
255
  "GET",
295
256
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/jobs"),
296
- params=remove_none_from_dict(
297
- {
298
- "extraction_agent_id": extraction_agent_id,
299
- "project_id": project_id,
300
- "organization_id": organization_id,
301
- }
302
- ),
257
+ params=remove_none_from_dict({"extraction_agent_id": extraction_agent_id}),
303
258
  headers=self._client_wrapper.get_headers(),
304
259
  timeout=60,
305
260
  )
@@ -313,19 +268,9 @@ class LlamaExtractClient:
313
268
  raise ApiError(status_code=_response.status_code, body=_response.text)
314
269
  raise ApiError(status_code=_response.status_code, body=_response_json)
315
270
 
316
- def run_job(
317
- self,
318
- *,
319
- project_id: typing.Optional[str] = None,
320
- organization_id: typing.Optional[str] = None,
321
- request: ExtractJobCreate,
322
- ) -> ExtractJob:
271
+ def run_job(self, *, request: ExtractJobCreate) -> ExtractJob:
323
272
  """
324
273
  Parameters:
325
- - project_id: typing.Optional[str].
326
-
327
- - organization_id: typing.Optional[str].
328
-
329
274
  - request: ExtractJobCreate.
330
275
  ---
331
276
  from llama_cloud import ExtractConfig, ExtractJobCreate, ExtractMode
@@ -347,7 +292,6 @@ class LlamaExtractClient:
347
292
  _response = self._client_wrapper.httpx_client.request(
348
293
  "POST",
349
294
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/jobs"),
350
- params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
351
295
  json=jsonable_encoder(request),
352
296
  headers=self._client_wrapper.get_headers(),
353
297
  timeout=60,
@@ -362,16 +306,10 @@ class LlamaExtractClient:
362
306
  raise ApiError(status_code=_response.status_code, body=_response.text)
363
307
  raise ApiError(status_code=_response.status_code, body=_response_json)
364
308
 
365
- def get_job(
366
- self, job_id: str, *, project_id: typing.Optional[str] = None, organization_id: typing.Optional[str] = None
367
- ) -> ExtractJob:
309
+ def get_job(self, job_id: str) -> ExtractJob:
368
310
  """
369
311
  Parameters:
370
312
  - job_id: str.
371
-
372
- - project_id: typing.Optional[str].
373
-
374
- - organization_id: typing.Optional[str].
375
313
  ---
376
314
  from llama_cloud.client import LlamaCloud
377
315
 
@@ -385,7 +323,6 @@ class LlamaExtractClient:
385
323
  _response = self._client_wrapper.httpx_client.request(
386
324
  "GET",
387
325
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/extractionv2/jobs/{job_id}"),
388
- params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
389
326
  headers=self._client_wrapper.get_headers(),
390
327
  timeout=60,
391
328
  )
@@ -399,19 +336,60 @@ class LlamaExtractClient:
399
336
  raise ApiError(status_code=_response.status_code, body=_response.text)
400
337
  raise ApiError(status_code=_response.status_code, body=_response_json)
401
338
 
402
- def run_job_with_parsed_file(
403
- self,
404
- *,
405
- project_id: typing.Optional[str] = None,
406
- organization_id: typing.Optional[str] = None,
407
- request: ExtractJobCreate,
408
- ) -> typing.Optional[ExtractResultset]:
339
+ def run_job_with_parsed_file_test(
340
+ self, *, job_create: ExtractJobCreate, extract_settings: typing.Optional[LlamaExtractSettings] = OMIT
341
+ ) -> ExtractResultset:
409
342
  """
410
343
  Parameters:
411
- - project_id: typing.Optional[str].
344
+ - job_create: ExtractJobCreate.
412
345
 
413
- - organization_id: typing.Optional[str].
346
+ - extract_settings: typing.Optional[LlamaExtractSettings].
347
+ ---
348
+ from llama_cloud import (
349
+ ExtractConfig,
350
+ ExtractJobCreate,
351
+ ExtractMode,
352
+ LlamaExtractSettings,
353
+ )
354
+ from llama_cloud.client import LlamaCloud
355
+
356
+ client = LlamaCloud(
357
+ token="YOUR_TOKEN",
358
+ )
359
+ client.llama_extract.run_job_with_parsed_file_test(
360
+ job_create=ExtractJobCreate(
361
+ extraction_agent_id="string",
362
+ file_id="string",
363
+ config_override=ExtractConfig(
364
+ extraction_mode=ExtractMode.PER_DOC,
365
+ ),
366
+ ),
367
+ extract_settings=LlamaExtractSettings(),
368
+ )
369
+ """
370
+ _request: typing.Dict[str, typing.Any] = {"job_create": job_create}
371
+ if extract_settings is not OMIT:
372
+ _request["extract_settings"] = extract_settings
373
+ _response = self._client_wrapper.httpx_client.request(
374
+ "POST",
375
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/jobs/parsed/test"),
376
+ json=jsonable_encoder(_request),
377
+ headers=self._client_wrapper.get_headers(),
378
+ timeout=60,
379
+ )
380
+ if 200 <= _response.status_code < 300:
381
+ return pydantic.parse_obj_as(ExtractResultset, _response.json()) # type: ignore
382
+ if _response.status_code == 422:
383
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
384
+ try:
385
+ _response_json = _response.json()
386
+ except JSONDecodeError:
387
+ raise ApiError(status_code=_response.status_code, body=_response.text)
388
+ raise ApiError(status_code=_response.status_code, body=_response_json)
414
389
 
390
+ def run_job_with_parsed_file(self, *, request: ExtractJobCreate) -> ExtractResultset:
391
+ """
392
+ Parameters:
415
393
  - request: ExtractJobCreate.
416
394
  ---
417
395
  from llama_cloud import ExtractConfig, ExtractJobCreate, ExtractMode
@@ -433,13 +411,12 @@ class LlamaExtractClient:
433
411
  _response = self._client_wrapper.httpx_client.request(
434
412
  "POST",
435
413
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/jobs/parsed"),
436
- params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
437
414
  json=jsonable_encoder(request),
438
415
  headers=self._client_wrapper.get_headers(),
439
416
  timeout=60,
440
417
  )
441
418
  if 200 <= _response.status_code < 300:
442
- return pydantic.parse_obj_as(typing.Optional[ExtractResultset], _response.json()) # type: ignore
419
+ return pydantic.parse_obj_as(ExtractResultset, _response.json()) # type: ignore
443
420
  if _response.status_code == 422:
444
421
  raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
445
422
  try:
@@ -448,20 +425,60 @@ class LlamaExtractClient:
448
425
  raise ApiError(status_code=_response.status_code, body=_response.text)
449
426
  raise ApiError(status_code=_response.status_code, body=_response_json)
450
427
 
451
- def run_jobs_in_batch(
452
- self,
453
- *,
454
- project_id: typing.Optional[str] = None,
455
- organization_id: typing.Optional[str] = None,
456
- extraction_agent_id: str,
457
- file_ids: typing.List[str],
458
- ) -> typing.List[ExtractJob]:
428
+ def run_job_test_user(
429
+ self, *, job_create: ExtractJobCreate, extract_settings: typing.Optional[LlamaExtractSettings] = OMIT
430
+ ) -> ExtractJob:
459
431
  """
460
432
  Parameters:
461
- - project_id: typing.Optional[str].
433
+ - job_create: ExtractJobCreate.
462
434
 
463
- - organization_id: typing.Optional[str].
435
+ - extract_settings: typing.Optional[LlamaExtractSettings].
436
+ ---
437
+ from llama_cloud import (
438
+ ExtractConfig,
439
+ ExtractJobCreate,
440
+ ExtractMode,
441
+ LlamaExtractSettings,
442
+ )
443
+ from llama_cloud.client import LlamaCloud
444
+
445
+ client = LlamaCloud(
446
+ token="YOUR_TOKEN",
447
+ )
448
+ client.llama_extract.run_job_test_user(
449
+ job_create=ExtractJobCreate(
450
+ extraction_agent_id="string",
451
+ file_id="string",
452
+ config_override=ExtractConfig(
453
+ extraction_mode=ExtractMode.PER_DOC,
454
+ ),
455
+ ),
456
+ extract_settings=LlamaExtractSettings(),
457
+ )
458
+ """
459
+ _request: typing.Dict[str, typing.Any] = {"job_create": job_create}
460
+ if extract_settings is not OMIT:
461
+ _request["extract_settings"] = extract_settings
462
+ _response = self._client_wrapper.httpx_client.request(
463
+ "POST",
464
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/jobs/test"),
465
+ json=jsonable_encoder(_request),
466
+ headers=self._client_wrapper.get_headers(),
467
+ timeout=60,
468
+ )
469
+ if 200 <= _response.status_code < 300:
470
+ return pydantic.parse_obj_as(ExtractJob, _response.json()) # type: ignore
471
+ if _response.status_code == 422:
472
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
473
+ try:
474
+ _response_json = _response.json()
475
+ except JSONDecodeError:
476
+ raise ApiError(status_code=_response.status_code, body=_response.text)
477
+ raise ApiError(status_code=_response.status_code, body=_response_json)
464
478
 
479
+ def run_jobs_in_batch(self, *, extraction_agent_id: str, file_ids: typing.List[str]) -> typing.List[ExtractJob]:
480
+ """
481
+ Parameters:
465
482
  - extraction_agent_id: str. The id of the extraction agent
466
483
 
467
484
  - file_ids: typing.List[str]. The ids of the files
@@ -479,7 +496,6 @@ class LlamaExtractClient:
479
496
  _response = self._client_wrapper.httpx_client.request(
480
497
  "POST",
481
498
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/jobs/batch"),
482
- params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
483
499
  json=jsonable_encoder({"extraction_agent_id": extraction_agent_id, "file_ids": file_ids}),
484
500
  headers=self._client_wrapper.get_headers(),
485
501
  timeout=60,
@@ -494,16 +510,10 @@ class LlamaExtractClient:
494
510
  raise ApiError(status_code=_response.status_code, body=_response.text)
495
511
  raise ApiError(status_code=_response.status_code, body=_response_json)
496
512
 
497
- def get_job_result(
498
- self, job_id: str, *, project_id: typing.Optional[str] = None, organization_id: typing.Optional[str] = None
499
- ) -> ExtractResultset:
513
+ def get_job_result(self, job_id: str) -> ExtractResultset:
500
514
  """
501
515
  Parameters:
502
516
  - job_id: str.
503
-
504
- - project_id: typing.Optional[str].
505
-
506
- - organization_id: typing.Optional[str].
507
517
  ---
508
518
  from llama_cloud.client import LlamaCloud
509
519
 
@@ -519,7 +529,6 @@ class LlamaExtractClient:
519
529
  urllib.parse.urljoin(
520
530
  f"{self._client_wrapper.get_base_url()}/", f"api/v1/extractionv2/jobs/{job_id}/result"
521
531
  ),
522
- params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
523
532
  headers=self._client_wrapper.get_headers(),
524
533
  timeout=60,
525
534
  )
@@ -533,56 +542,39 @@ class LlamaExtractClient:
533
542
  raise ApiError(status_code=_response.status_code, body=_response.text)
534
543
  raise ApiError(status_code=_response.status_code, body=_response_json)
535
544
 
536
-
537
- class AsyncLlamaExtractClient:
538
- def __init__(self, *, client_wrapper: AsyncClientWrapper):
539
- self._client_wrapper = client_wrapper
540
-
541
- async def create_extraction_agent(
545
+ def list_extract_runs(
542
546
  self,
543
547
  *,
544
- project_id: typing.Optional[str] = None,
545
- organization_id: typing.Optional[str] = None,
546
- name: str,
547
- data_schema: typing.Dict[str, typing.Optional[ExtractAgentCreateDataSchemaValue]],
548
- config: ExtractConfig,
549
- ) -> ExtractAgent:
548
+ extraction_agent_id: typing.Optional[str] = None,
549
+ run_id: typing.Optional[str] = None,
550
+ job_id: typing.Optional[str] = None,
551
+ ) -> typing.List[ExtractRun]:
550
552
  """
551
553
  Parameters:
552
- - project_id: typing.Optional[str].
554
+ - extraction_agent_id: typing.Optional[str].
553
555
 
554
- - organization_id: typing.Optional[str].
556
+ - run_id: typing.Optional[str].
555
557
 
556
- - name: str. The name of the extraction schema
557
-
558
- - data_schema: typing.Dict[str, typing.Optional[ExtractAgentCreateDataSchemaValue]]. The schema of the data.
559
-
560
- - config: ExtractConfig. The configuration parameters for the extraction agent.
558
+ - job_id: typing.Optional[str].
561
559
  ---
562
- from llama_cloud import ExtractConfig, ExtractMode
563
- from llama_cloud.client import AsyncLlamaCloud
560
+ from llama_cloud.client import LlamaCloud
564
561
 
565
- client = AsyncLlamaCloud(
562
+ client = LlamaCloud(
566
563
  token="YOUR_TOKEN",
567
564
  )
568
- await client.llama_extract.create_extraction_agent(
569
- name="string",
570
- data_schema={},
571
- config=ExtractConfig(
572
- extraction_mode=ExtractMode.PER_DOC,
573
- ),
574
- )
565
+ client.llama_extract.list_extract_runs()
575
566
  """
576
- _response = await self._client_wrapper.httpx_client.request(
577
- "POST",
578
- urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/extraction_agent"),
579
- params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
580
- json=jsonable_encoder({"name": name, "data_schema": data_schema, "config": config}),
567
+ _response = self._client_wrapper.httpx_client.request(
568
+ "GET",
569
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/runs"),
570
+ params=remove_none_from_dict(
571
+ {"extraction_agent_id": extraction_agent_id, "run_id": run_id, "job_id": job_id}
572
+ ),
581
573
  headers=self._client_wrapper.get_headers(),
582
574
  timeout=60,
583
575
  )
584
576
  if 200 <= _response.status_code < 300:
585
- return pydantic.parse_obj_as(ExtractAgent, _response.json()) # type: ignore
577
+ return pydantic.parse_obj_as(typing.List[ExtractRun], _response.json()) # type: ignore
586
578
  if _response.status_code == 422:
587
579
  raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
588
580
  try:
@@ -591,28 +583,31 @@ class AsyncLlamaExtractClient:
591
583
  raise ApiError(status_code=_response.status_code, body=_response.text)
592
584
  raise ApiError(status_code=_response.status_code, body=_response_json)
593
585
 
586
+
587
+ class AsyncLlamaExtractClient:
588
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
589
+ self._client_wrapper = client_wrapper
590
+
594
591
  async def list_extraction_agents(
595
- self, *, project_id: str, organization_id: typing.Optional[str] = None
592
+ self, *, project_id: typing.Optional[str] = None, name: typing.Optional[str] = None
596
593
  ) -> typing.List[ExtractAgent]:
597
594
  """
598
595
  Parameters:
599
- - project_id: str.
596
+ - project_id: typing.Optional[str].
600
597
 
601
- - organization_id: typing.Optional[str].
598
+ - name: typing.Optional[str].
602
599
  ---
603
600
  from llama_cloud.client import AsyncLlamaCloud
604
601
 
605
602
  client = AsyncLlamaCloud(
606
603
  token="YOUR_TOKEN",
607
604
  )
608
- await client.llama_extract.list_extraction_agents(
609
- project_id="string",
610
- )
605
+ await client.llama_extract.list_extraction_agents()
611
606
  """
612
607
  _response = await self._client_wrapper.httpx_client.request(
613
608
  "GET",
614
- urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/extraction_agents"),
615
- params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
609
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/extraction-agents"),
610
+ params=remove_none_from_dict({"project_id": project_id, "name": name}),
616
611
  headers=self._client_wrapper.get_headers(),
617
612
  timeout=60,
618
613
  )
@@ -626,20 +621,63 @@ class AsyncLlamaExtractClient:
626
621
  raise ApiError(status_code=_response.status_code, body=_response.text)
627
622
  raise ApiError(status_code=_response.status_code, body=_response_json)
628
623
 
629
- async def get_extraction_agent(
624
+ async def create_extraction_agent(
630
625
  self,
631
- extraction_agent_id: str,
632
626
  *,
633
627
  project_id: typing.Optional[str] = None,
634
628
  organization_id: typing.Optional[str] = None,
629
+ name: str,
630
+ data_schema: typing.Dict[str, typing.Optional[ExtractAgentCreateDataSchemaValue]],
631
+ config: ExtractConfig,
635
632
  ) -> ExtractAgent:
636
633
  """
637
634
  Parameters:
638
- - extraction_agent_id: str.
639
-
640
635
  - project_id: typing.Optional[str].
641
636
 
642
637
  - organization_id: typing.Optional[str].
638
+
639
+ - name: str. The name of the extraction schema
640
+
641
+ - data_schema: typing.Dict[str, typing.Optional[ExtractAgentCreateDataSchemaValue]]. The schema of the data.
642
+
643
+ - config: ExtractConfig. The configuration parameters for the extraction agent.
644
+ ---
645
+ from llama_cloud import ExtractConfig, ExtractMode
646
+ from llama_cloud.client import AsyncLlamaCloud
647
+
648
+ client = AsyncLlamaCloud(
649
+ token="YOUR_TOKEN",
650
+ )
651
+ await client.llama_extract.create_extraction_agent(
652
+ name="string",
653
+ data_schema={},
654
+ config=ExtractConfig(
655
+ extraction_mode=ExtractMode.PER_DOC,
656
+ ),
657
+ )
658
+ """
659
+ _response = await self._client_wrapper.httpx_client.request(
660
+ "POST",
661
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/extraction-agents"),
662
+ params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
663
+ json=jsonable_encoder({"name": name, "data_schema": data_schema, "config": config}),
664
+ headers=self._client_wrapper.get_headers(),
665
+ timeout=60,
666
+ )
667
+ if 200 <= _response.status_code < 300:
668
+ return pydantic.parse_obj_as(ExtractAgent, _response.json()) # type: ignore
669
+ if _response.status_code == 422:
670
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
671
+ try:
672
+ _response_json = _response.json()
673
+ except JSONDecodeError:
674
+ raise ApiError(status_code=_response.status_code, body=_response.text)
675
+ raise ApiError(status_code=_response.status_code, body=_response_json)
676
+
677
+ async def get_extraction_agent(self, extraction_agent_id: str) -> ExtractAgent:
678
+ """
679
+ Parameters:
680
+ - extraction_agent_id: str.
643
681
  ---
644
682
  from llama_cloud.client import AsyncLlamaCloud
645
683
 
@@ -654,9 +692,8 @@ class AsyncLlamaExtractClient:
654
692
  "GET",
655
693
  urllib.parse.urljoin(
656
694
  f"{self._client_wrapper.get_base_url()}/",
657
- f"api/v1/extractionv2/extraction_agents/{extraction_agent_id}",
695
+ f"api/v1/extractionv2/extraction-agents/{extraction_agent_id}",
658
696
  ),
659
- params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
660
697
  headers=self._client_wrapper.get_headers(),
661
698
  timeout=60,
662
699
  )
@@ -674,8 +711,6 @@ class AsyncLlamaExtractClient:
674
711
  self,
675
712
  extraction_agent_id: str,
676
713
  *,
677
- project_id: typing.Optional[str] = None,
678
- organization_id: typing.Optional[str] = None,
679
714
  data_schema: typing.Dict[str, typing.Optional[ExtractAgentUpdateDataSchemaValue]],
680
715
  config: ExtractConfig,
681
716
  ) -> ExtractAgent:
@@ -683,10 +718,6 @@ class AsyncLlamaExtractClient:
683
718
  Parameters:
684
719
  - extraction_agent_id: str.
685
720
 
686
- - project_id: typing.Optional[str].
687
-
688
- - organization_id: typing.Optional[str].
689
-
690
721
  - data_schema: typing.Dict[str, typing.Optional[ExtractAgentUpdateDataSchemaValue]]. The schema of the data
691
722
 
692
723
  - config: ExtractConfig. The configuration parameters for the extraction agent.
@@ -709,9 +740,8 @@ class AsyncLlamaExtractClient:
709
740
  "PUT",
710
741
  urllib.parse.urljoin(
711
742
  f"{self._client_wrapper.get_base_url()}/",
712
- f"api/v1/extractionv2/extraction_agents/{extraction_agent_id}",
743
+ f"api/v1/extractionv2/extraction-agents/{extraction_agent_id}",
713
744
  ),
714
- params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
715
745
  json=jsonable_encoder({"data_schema": data_schema, "config": config}),
716
746
  headers=self._client_wrapper.get_headers(),
717
747
  timeout=60,
@@ -726,20 +756,10 @@ class AsyncLlamaExtractClient:
726
756
  raise ApiError(status_code=_response.status_code, body=_response.text)
727
757
  raise ApiError(status_code=_response.status_code, body=_response_json)
728
758
 
729
- async def delete_extraction_agent(
730
- self,
731
- extraction_agent_id: str,
732
- *,
733
- project_id: typing.Optional[str] = None,
734
- organization_id: typing.Optional[str] = None,
735
- ) -> typing.Any:
759
+ async def delete_extraction_agent(self, extraction_agent_id: str) -> typing.Any:
736
760
  """
737
761
  Parameters:
738
762
  - extraction_agent_id: str.
739
-
740
- - project_id: typing.Optional[str].
741
-
742
- - organization_id: typing.Optional[str].
743
763
  ---
744
764
  from llama_cloud.client import AsyncLlamaCloud
745
765
 
@@ -754,9 +774,8 @@ class AsyncLlamaExtractClient:
754
774
  "DELETE",
755
775
  urllib.parse.urljoin(
756
776
  f"{self._client_wrapper.get_base_url()}/",
757
- f"api/v1/extractionv2/extraction_agents/{extraction_agent_id}",
777
+ f"api/v1/extractionv2/extraction-agents/{extraction_agent_id}",
758
778
  ),
759
- params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
760
779
  headers=self._client_wrapper.get_headers(),
761
780
  timeout=60,
762
781
  )
@@ -770,20 +789,10 @@ class AsyncLlamaExtractClient:
770
789
  raise ApiError(status_code=_response.status_code, body=_response.text)
771
790
  raise ApiError(status_code=_response.status_code, body=_response_json)
772
791
 
773
- async def list_jobs(
774
- self,
775
- *,
776
- extraction_agent_id: str,
777
- project_id: typing.Optional[str] = None,
778
- organization_id: typing.Optional[str] = None,
779
- ) -> typing.List[ExtractJob]:
792
+ async def list_jobs(self, *, extraction_agent_id: str) -> typing.List[ExtractJob]:
780
793
  """
781
794
  Parameters:
782
795
  - extraction_agent_id: str.
783
-
784
- - project_id: typing.Optional[str].
785
-
786
- - organization_id: typing.Optional[str].
787
796
  ---
788
797
  from llama_cloud.client import AsyncLlamaCloud
789
798
 
@@ -797,13 +806,7 @@ class AsyncLlamaExtractClient:
797
806
  _response = await self._client_wrapper.httpx_client.request(
798
807
  "GET",
799
808
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/jobs"),
800
- params=remove_none_from_dict(
801
- {
802
- "extraction_agent_id": extraction_agent_id,
803
- "project_id": project_id,
804
- "organization_id": organization_id,
805
- }
806
- ),
809
+ params=remove_none_from_dict({"extraction_agent_id": extraction_agent_id}),
807
810
  headers=self._client_wrapper.get_headers(),
808
811
  timeout=60,
809
812
  )
@@ -817,19 +820,9 @@ class AsyncLlamaExtractClient:
817
820
  raise ApiError(status_code=_response.status_code, body=_response.text)
818
821
  raise ApiError(status_code=_response.status_code, body=_response_json)
819
822
 
820
- async def run_job(
821
- self,
822
- *,
823
- project_id: typing.Optional[str] = None,
824
- organization_id: typing.Optional[str] = None,
825
- request: ExtractJobCreate,
826
- ) -> ExtractJob:
823
+ async def run_job(self, *, request: ExtractJobCreate) -> ExtractJob:
827
824
  """
828
825
  Parameters:
829
- - project_id: typing.Optional[str].
830
-
831
- - organization_id: typing.Optional[str].
832
-
833
826
  - request: ExtractJobCreate.
834
827
  ---
835
828
  from llama_cloud import ExtractConfig, ExtractJobCreate, ExtractMode
@@ -851,7 +844,6 @@ class AsyncLlamaExtractClient:
851
844
  _response = await self._client_wrapper.httpx_client.request(
852
845
  "POST",
853
846
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/jobs"),
854
- params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
855
847
  json=jsonable_encoder(request),
856
848
  headers=self._client_wrapper.get_headers(),
857
849
  timeout=60,
@@ -866,16 +858,10 @@ class AsyncLlamaExtractClient:
866
858
  raise ApiError(status_code=_response.status_code, body=_response.text)
867
859
  raise ApiError(status_code=_response.status_code, body=_response_json)
868
860
 
869
- async def get_job(
870
- self, job_id: str, *, project_id: typing.Optional[str] = None, organization_id: typing.Optional[str] = None
871
- ) -> ExtractJob:
861
+ async def get_job(self, job_id: str) -> ExtractJob:
872
862
  """
873
863
  Parameters:
874
864
  - job_id: str.
875
-
876
- - project_id: typing.Optional[str].
877
-
878
- - organization_id: typing.Optional[str].
879
865
  ---
880
866
  from llama_cloud.client import AsyncLlamaCloud
881
867
 
@@ -889,7 +875,6 @@ class AsyncLlamaExtractClient:
889
875
  _response = await self._client_wrapper.httpx_client.request(
890
876
  "GET",
891
877
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/extractionv2/jobs/{job_id}"),
892
- params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
893
878
  headers=self._client_wrapper.get_headers(),
894
879
  timeout=60,
895
880
  )
@@ -903,19 +888,60 @@ class AsyncLlamaExtractClient:
903
888
  raise ApiError(status_code=_response.status_code, body=_response.text)
904
889
  raise ApiError(status_code=_response.status_code, body=_response_json)
905
890
 
906
- async def run_job_with_parsed_file(
907
- self,
908
- *,
909
- project_id: typing.Optional[str] = None,
910
- organization_id: typing.Optional[str] = None,
911
- request: ExtractJobCreate,
912
- ) -> typing.Optional[ExtractResultset]:
891
+ async def run_job_with_parsed_file_test(
892
+ self, *, job_create: ExtractJobCreate, extract_settings: typing.Optional[LlamaExtractSettings] = OMIT
893
+ ) -> ExtractResultset:
913
894
  """
914
895
  Parameters:
915
- - project_id: typing.Optional[str].
896
+ - job_create: ExtractJobCreate.
916
897
 
917
- - organization_id: typing.Optional[str].
898
+ - extract_settings: typing.Optional[LlamaExtractSettings].
899
+ ---
900
+ from llama_cloud import (
901
+ ExtractConfig,
902
+ ExtractJobCreate,
903
+ ExtractMode,
904
+ LlamaExtractSettings,
905
+ )
906
+ from llama_cloud.client import AsyncLlamaCloud
907
+
908
+ client = AsyncLlamaCloud(
909
+ token="YOUR_TOKEN",
910
+ )
911
+ await client.llama_extract.run_job_with_parsed_file_test(
912
+ job_create=ExtractJobCreate(
913
+ extraction_agent_id="string",
914
+ file_id="string",
915
+ config_override=ExtractConfig(
916
+ extraction_mode=ExtractMode.PER_DOC,
917
+ ),
918
+ ),
919
+ extract_settings=LlamaExtractSettings(),
920
+ )
921
+ """
922
+ _request: typing.Dict[str, typing.Any] = {"job_create": job_create}
923
+ if extract_settings is not OMIT:
924
+ _request["extract_settings"] = extract_settings
925
+ _response = await self._client_wrapper.httpx_client.request(
926
+ "POST",
927
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/jobs/parsed/test"),
928
+ json=jsonable_encoder(_request),
929
+ headers=self._client_wrapper.get_headers(),
930
+ timeout=60,
931
+ )
932
+ if 200 <= _response.status_code < 300:
933
+ return pydantic.parse_obj_as(ExtractResultset, _response.json()) # type: ignore
934
+ if _response.status_code == 422:
935
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
936
+ try:
937
+ _response_json = _response.json()
938
+ except JSONDecodeError:
939
+ raise ApiError(status_code=_response.status_code, body=_response.text)
940
+ raise ApiError(status_code=_response.status_code, body=_response_json)
918
941
 
942
+ async def run_job_with_parsed_file(self, *, request: ExtractJobCreate) -> ExtractResultset:
943
+ """
944
+ Parameters:
919
945
  - request: ExtractJobCreate.
920
946
  ---
921
947
  from llama_cloud import ExtractConfig, ExtractJobCreate, ExtractMode
@@ -937,13 +963,12 @@ class AsyncLlamaExtractClient:
937
963
  _response = await self._client_wrapper.httpx_client.request(
938
964
  "POST",
939
965
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/jobs/parsed"),
940
- params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
941
966
  json=jsonable_encoder(request),
942
967
  headers=self._client_wrapper.get_headers(),
943
968
  timeout=60,
944
969
  )
945
970
  if 200 <= _response.status_code < 300:
946
- return pydantic.parse_obj_as(typing.Optional[ExtractResultset], _response.json()) # type: ignore
971
+ return pydantic.parse_obj_as(ExtractResultset, _response.json()) # type: ignore
947
972
  if _response.status_code == 422:
948
973
  raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
949
974
  try:
@@ -952,20 +977,62 @@ class AsyncLlamaExtractClient:
952
977
  raise ApiError(status_code=_response.status_code, body=_response.text)
953
978
  raise ApiError(status_code=_response.status_code, body=_response_json)
954
979
 
955
- async def run_jobs_in_batch(
956
- self,
957
- *,
958
- project_id: typing.Optional[str] = None,
959
- organization_id: typing.Optional[str] = None,
960
- extraction_agent_id: str,
961
- file_ids: typing.List[str],
962
- ) -> typing.List[ExtractJob]:
980
+ async def run_job_test_user(
981
+ self, *, job_create: ExtractJobCreate, extract_settings: typing.Optional[LlamaExtractSettings] = OMIT
982
+ ) -> ExtractJob:
963
983
  """
964
984
  Parameters:
965
- - project_id: typing.Optional[str].
985
+ - job_create: ExtractJobCreate.
966
986
 
967
- - organization_id: typing.Optional[str].
987
+ - extract_settings: typing.Optional[LlamaExtractSettings].
988
+ ---
989
+ from llama_cloud import (
990
+ ExtractConfig,
991
+ ExtractJobCreate,
992
+ ExtractMode,
993
+ LlamaExtractSettings,
994
+ )
995
+ from llama_cloud.client import AsyncLlamaCloud
968
996
 
997
+ client = AsyncLlamaCloud(
998
+ token="YOUR_TOKEN",
999
+ )
1000
+ await client.llama_extract.run_job_test_user(
1001
+ job_create=ExtractJobCreate(
1002
+ extraction_agent_id="string",
1003
+ file_id="string",
1004
+ config_override=ExtractConfig(
1005
+ extraction_mode=ExtractMode.PER_DOC,
1006
+ ),
1007
+ ),
1008
+ extract_settings=LlamaExtractSettings(),
1009
+ )
1010
+ """
1011
+ _request: typing.Dict[str, typing.Any] = {"job_create": job_create}
1012
+ if extract_settings is not OMIT:
1013
+ _request["extract_settings"] = extract_settings
1014
+ _response = await self._client_wrapper.httpx_client.request(
1015
+ "POST",
1016
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/jobs/test"),
1017
+ json=jsonable_encoder(_request),
1018
+ headers=self._client_wrapper.get_headers(),
1019
+ timeout=60,
1020
+ )
1021
+ if 200 <= _response.status_code < 300:
1022
+ return pydantic.parse_obj_as(ExtractJob, _response.json()) # type: ignore
1023
+ if _response.status_code == 422:
1024
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
1025
+ try:
1026
+ _response_json = _response.json()
1027
+ except JSONDecodeError:
1028
+ raise ApiError(status_code=_response.status_code, body=_response.text)
1029
+ raise ApiError(status_code=_response.status_code, body=_response_json)
1030
+
1031
+ async def run_jobs_in_batch(
1032
+ self, *, extraction_agent_id: str, file_ids: typing.List[str]
1033
+ ) -> typing.List[ExtractJob]:
1034
+ """
1035
+ Parameters:
969
1036
  - extraction_agent_id: str. The id of the extraction agent
970
1037
 
971
1038
  - file_ids: typing.List[str]. The ids of the files
@@ -983,7 +1050,6 @@ class AsyncLlamaExtractClient:
983
1050
  _response = await self._client_wrapper.httpx_client.request(
984
1051
  "POST",
985
1052
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/jobs/batch"),
986
- params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
987
1053
  json=jsonable_encoder({"extraction_agent_id": extraction_agent_id, "file_ids": file_ids}),
988
1054
  headers=self._client_wrapper.get_headers(),
989
1055
  timeout=60,
@@ -998,16 +1064,10 @@ class AsyncLlamaExtractClient:
998
1064
  raise ApiError(status_code=_response.status_code, body=_response.text)
999
1065
  raise ApiError(status_code=_response.status_code, body=_response_json)
1000
1066
 
1001
- async def get_job_result(
1002
- self, job_id: str, *, project_id: typing.Optional[str] = None, organization_id: typing.Optional[str] = None
1003
- ) -> ExtractResultset:
1067
+ async def get_job_result(self, job_id: str) -> ExtractResultset:
1004
1068
  """
1005
1069
  Parameters:
1006
1070
  - job_id: str.
1007
-
1008
- - project_id: typing.Optional[str].
1009
-
1010
- - organization_id: typing.Optional[str].
1011
1071
  ---
1012
1072
  from llama_cloud.client import AsyncLlamaCloud
1013
1073
 
@@ -1023,7 +1083,6 @@ class AsyncLlamaExtractClient:
1023
1083
  urllib.parse.urljoin(
1024
1084
  f"{self._client_wrapper.get_base_url()}/", f"api/v1/extractionv2/jobs/{job_id}/result"
1025
1085
  ),
1026
- params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
1027
1086
  headers=self._client_wrapper.get_headers(),
1028
1087
  timeout=60,
1029
1088
  )
@@ -1036,3 +1095,44 @@ class AsyncLlamaExtractClient:
1036
1095
  except JSONDecodeError:
1037
1096
  raise ApiError(status_code=_response.status_code, body=_response.text)
1038
1097
  raise ApiError(status_code=_response.status_code, body=_response_json)
1098
+
1099
+ async def list_extract_runs(
1100
+ self,
1101
+ *,
1102
+ extraction_agent_id: typing.Optional[str] = None,
1103
+ run_id: typing.Optional[str] = None,
1104
+ job_id: typing.Optional[str] = None,
1105
+ ) -> typing.List[ExtractRun]:
1106
+ """
1107
+ Parameters:
1108
+ - extraction_agent_id: typing.Optional[str].
1109
+
1110
+ - run_id: typing.Optional[str].
1111
+
1112
+ - job_id: typing.Optional[str].
1113
+ ---
1114
+ from llama_cloud.client import AsyncLlamaCloud
1115
+
1116
+ client = AsyncLlamaCloud(
1117
+ token="YOUR_TOKEN",
1118
+ )
1119
+ await client.llama_extract.list_extract_runs()
1120
+ """
1121
+ _response = await self._client_wrapper.httpx_client.request(
1122
+ "GET",
1123
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/runs"),
1124
+ params=remove_none_from_dict(
1125
+ {"extraction_agent_id": extraction_agent_id, "run_id": run_id, "job_id": job_id}
1126
+ ),
1127
+ headers=self._client_wrapper.get_headers(),
1128
+ timeout=60,
1129
+ )
1130
+ if 200 <= _response.status_code < 300:
1131
+ return pydantic.parse_obj_as(typing.List[ExtractRun], _response.json()) # type: ignore
1132
+ if _response.status_code == 422:
1133
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
1134
+ try:
1135
+ _response_json = _response.json()
1136
+ except JSONDecodeError:
1137
+ raise ApiError(status_code=_response.status_code, body=_response.text)
1138
+ raise ApiError(status_code=_response.status_code, body=_response_json)