llama-cloud 0.1.8__py3-none-any.whl → 0.1.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of llama-cloud might be problematic. Click here for more details.

Files changed (27) hide show
  1. llama_cloud/__init__.py +4 -16
  2. llama_cloud/client.py +0 -3
  3. llama_cloud/resources/__init__.py +0 -5
  4. llama_cloud/resources/files/client.py +34 -6
  5. llama_cloud/resources/llama_extract/client.py +126 -424
  6. llama_cloud/resources/parsing/client.py +82 -18
  7. llama_cloud/types/__init__.py +4 -10
  8. llama_cloud/types/extract_job.py +3 -1
  9. llama_cloud/types/extract_resultset.py +2 -6
  10. llama_cloud/types/extract_run.py +5 -0
  11. llama_cloud/types/extract_run_data_value.py +5 -0
  12. llama_cloud/types/{extraction_schema_data_schema_value.py → extract_run_extraction_metadata_value.py} +1 -1
  13. llama_cloud/types/extract_state.py +4 -4
  14. llama_cloud/types/llama_parse_parameters.py +3 -0
  15. {llama_cloud-0.1.8.dist-info → llama_cloud-0.1.9.dist-info}/METADATA +2 -1
  16. {llama_cloud-0.1.8.dist-info → llama_cloud-0.1.9.dist-info}/RECORD +18 -26
  17. {llama_cloud-0.1.8.dist-info → llama_cloud-0.1.9.dist-info}/WHEEL +1 -1
  18. llama_cloud/resources/extraction/__init__.py +0 -5
  19. llama_cloud/resources/extraction/client.py +0 -756
  20. llama_cloud/resources/extraction/types/__init__.py +0 -6
  21. llama_cloud/resources/extraction/types/extraction_schema_create_data_schema_value.py +0 -7
  22. llama_cloud/resources/extraction/types/extraction_schema_update_data_schema_value.py +0 -7
  23. llama_cloud/types/extraction_job.py +0 -35
  24. llama_cloud/types/extraction_result.py +0 -44
  25. llama_cloud/types/extraction_result_data_value.py +0 -5
  26. llama_cloud/types/extraction_schema.py +0 -41
  27. {llama_cloud-0.1.8.dist-info → llama_cloud-0.1.9.dist-info}/LICENSE +0 -0
@@ -36,81 +36,26 @@ class LlamaExtractClient:
36
36
  def __init__(self, *, client_wrapper: SyncClientWrapper):
37
37
  self._client_wrapper = client_wrapper
38
38
 
39
- def create_extraction_agent(
40
- self,
41
- *,
42
- project_id: typing.Optional[str] = None,
43
- organization_id: typing.Optional[str] = None,
44
- name: str,
45
- data_schema: typing.Dict[str, typing.Optional[ExtractAgentCreateDataSchemaValue]],
46
- config: ExtractConfig,
47
- ) -> ExtractAgent:
48
- """
49
- Parameters:
50
- - project_id: typing.Optional[str].
51
-
52
- - organization_id: typing.Optional[str].
53
-
54
- - name: str. The name of the extraction schema
55
-
56
- - data_schema: typing.Dict[str, typing.Optional[ExtractAgentCreateDataSchemaValue]]. The schema of the data.
57
-
58
- - config: ExtractConfig. The configuration parameters for the extraction agent.
59
- ---
60
- from llama_cloud import ExtractConfig, ExtractMode
61
- from llama_cloud.client import LlamaCloud
62
-
63
- client = LlamaCloud(
64
- token="YOUR_TOKEN",
65
- )
66
- client.llama_extract.create_extraction_agent(
67
- name="string",
68
- data_schema={},
69
- config=ExtractConfig(
70
- extraction_mode=ExtractMode.PER_DOC,
71
- ),
72
- )
73
- """
74
- _response = self._client_wrapper.httpx_client.request(
75
- "POST",
76
- urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/extraction_agent"),
77
- params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
78
- json=jsonable_encoder({"name": name, "data_schema": data_schema, "config": config}),
79
- headers=self._client_wrapper.get_headers(),
80
- timeout=60,
81
- )
82
- if 200 <= _response.status_code < 300:
83
- return pydantic.parse_obj_as(ExtractAgent, _response.json()) # type: ignore
84
- if _response.status_code == 422:
85
- raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
86
- try:
87
- _response_json = _response.json()
88
- except JSONDecodeError:
89
- raise ApiError(status_code=_response.status_code, body=_response.text)
90
- raise ApiError(status_code=_response.status_code, body=_response_json)
91
-
92
39
  def list_extraction_agents(
93
- self, *, project_id: str, organization_id: typing.Optional[str] = None
40
+ self, *, project_id: typing.Optional[str] = None, name: typing.Optional[str] = None
94
41
  ) -> typing.List[ExtractAgent]:
95
42
  """
96
43
  Parameters:
97
- - project_id: str.
44
+ - project_id: typing.Optional[str].
98
45
 
99
- - organization_id: typing.Optional[str].
46
+ - name: typing.Optional[str].
100
47
  ---
101
48
  from llama_cloud.client import LlamaCloud
102
49
 
103
50
  client = LlamaCloud(
104
51
  token="YOUR_TOKEN",
105
52
  )
106
- client.llama_extract.list_extraction_agents(
107
- project_id="string",
108
- )
53
+ client.llama_extract.list_extraction_agents()
109
54
  """
110
55
  _response = self._client_wrapper.httpx_client.request(
111
56
  "GET",
112
- urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/extraction_agents"),
113
- params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
57
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/extraction-agents"),
58
+ params=remove_none_from_dict({"project_id": project_id, "name": name}),
114
59
  headers=self._client_wrapper.get_headers(),
115
60
  timeout=60,
116
61
  )
@@ -124,32 +69,46 @@ class LlamaExtractClient:
124
69
  raise ApiError(status_code=_response.status_code, body=_response.text)
125
70
  raise ApiError(status_code=_response.status_code, body=_response_json)
126
71
 
127
- def get_extraction_agent_by_name(
128
- self, *, name: str, project_id: typing.Optional[str] = None, organization_id: typing.Optional[str] = None
72
+ def create_extraction_agent(
73
+ self,
74
+ *,
75
+ project_id: typing.Optional[str] = None,
76
+ organization_id: typing.Optional[str] = None,
77
+ name: str,
78
+ data_schema: typing.Dict[str, typing.Optional[ExtractAgentCreateDataSchemaValue]],
79
+ config: ExtractConfig,
129
80
  ) -> ExtractAgent:
130
81
  """
131
82
  Parameters:
132
- - name: str.
133
-
134
83
  - project_id: typing.Optional[str].
135
84
 
136
85
  - organization_id: typing.Optional[str].
86
+
87
+ - name: str. The name of the extraction schema
88
+
89
+ - data_schema: typing.Dict[str, typing.Optional[ExtractAgentCreateDataSchemaValue]]. The schema of the data.
90
+
91
+ - config: ExtractConfig. The configuration parameters for the extraction agent.
137
92
  ---
93
+ from llama_cloud import ExtractConfig, ExtractMode
138
94
  from llama_cloud.client import LlamaCloud
139
95
 
140
96
  client = LlamaCloud(
141
97
  token="YOUR_TOKEN",
142
98
  )
143
- client.llama_extract.get_extraction_agent_by_name(
99
+ client.llama_extract.create_extraction_agent(
144
100
  name="string",
101
+ data_schema={},
102
+ config=ExtractConfig(
103
+ extraction_mode=ExtractMode.PER_DOC,
104
+ ),
145
105
  )
146
106
  """
147
107
  _response = self._client_wrapper.httpx_client.request(
148
- "GET",
149
- urllib.parse.urljoin(
150
- f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/extraction_agents/by_name"
151
- ),
152
- params=remove_none_from_dict({"name": name, "project_id": project_id, "organization_id": organization_id}),
108
+ "POST",
109
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/extraction-agents"),
110
+ params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
111
+ json=jsonable_encoder({"name": name, "data_schema": data_schema, "config": config}),
153
112
  headers=self._client_wrapper.get_headers(),
154
113
  timeout=60,
155
114
  )
@@ -163,20 +122,10 @@ class LlamaExtractClient:
163
122
  raise ApiError(status_code=_response.status_code, body=_response.text)
164
123
  raise ApiError(status_code=_response.status_code, body=_response_json)
165
124
 
166
- def get_extraction_agent(
167
- self,
168
- extraction_agent_id: str,
169
- *,
170
- project_id: typing.Optional[str] = None,
171
- organization_id: typing.Optional[str] = None,
172
- ) -> ExtractAgent:
125
+ def get_extraction_agent(self, extraction_agent_id: str) -> ExtractAgent:
173
126
  """
174
127
  Parameters:
175
128
  - extraction_agent_id: str.
176
-
177
- - project_id: typing.Optional[str].
178
-
179
- - organization_id: typing.Optional[str].
180
129
  ---
181
130
  from llama_cloud.client import LlamaCloud
182
131
 
@@ -191,9 +140,8 @@ class LlamaExtractClient:
191
140
  "GET",
192
141
  urllib.parse.urljoin(
193
142
  f"{self._client_wrapper.get_base_url()}/",
194
- f"api/v1/extractionv2/extraction_agents/{extraction_agent_id}",
143
+ f"api/v1/extractionv2/extraction-agents/{extraction_agent_id}",
195
144
  ),
196
- params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
197
145
  headers=self._client_wrapper.get_headers(),
198
146
  timeout=60,
199
147
  )
@@ -211,8 +159,6 @@ class LlamaExtractClient:
211
159
  self,
212
160
  extraction_agent_id: str,
213
161
  *,
214
- project_id: typing.Optional[str] = None,
215
- organization_id: typing.Optional[str] = None,
216
162
  data_schema: typing.Dict[str, typing.Optional[ExtractAgentUpdateDataSchemaValue]],
217
163
  config: ExtractConfig,
218
164
  ) -> ExtractAgent:
@@ -220,10 +166,6 @@ class LlamaExtractClient:
220
166
  Parameters:
221
167
  - extraction_agent_id: str.
222
168
 
223
- - project_id: typing.Optional[str].
224
-
225
- - organization_id: typing.Optional[str].
226
-
227
169
  - data_schema: typing.Dict[str, typing.Optional[ExtractAgentUpdateDataSchemaValue]]. The schema of the data
228
170
 
229
171
  - config: ExtractConfig. The configuration parameters for the extraction agent.
@@ -246,9 +188,8 @@ class LlamaExtractClient:
246
188
  "PUT",
247
189
  urllib.parse.urljoin(
248
190
  f"{self._client_wrapper.get_base_url()}/",
249
- f"api/v1/extractionv2/extraction_agents/{extraction_agent_id}",
191
+ f"api/v1/extractionv2/extraction-agents/{extraction_agent_id}",
250
192
  ),
251
- params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
252
193
  json=jsonable_encoder({"data_schema": data_schema, "config": config}),
253
194
  headers=self._client_wrapper.get_headers(),
254
195
  timeout=60,
@@ -263,20 +204,10 @@ class LlamaExtractClient:
263
204
  raise ApiError(status_code=_response.status_code, body=_response.text)
264
205
  raise ApiError(status_code=_response.status_code, body=_response_json)
265
206
 
266
- def delete_extraction_agent(
267
- self,
268
- extraction_agent_id: str,
269
- *,
270
- project_id: typing.Optional[str] = None,
271
- organization_id: typing.Optional[str] = None,
272
- ) -> typing.Any:
207
+ def delete_extraction_agent(self, extraction_agent_id: str) -> typing.Any:
273
208
  """
274
209
  Parameters:
275
210
  - extraction_agent_id: str.
276
-
277
- - project_id: typing.Optional[str].
278
-
279
- - organization_id: typing.Optional[str].
280
211
  ---
281
212
  from llama_cloud.client import LlamaCloud
282
213
 
@@ -291,9 +222,8 @@ class LlamaExtractClient:
291
222
  "DELETE",
292
223
  urllib.parse.urljoin(
293
224
  f"{self._client_wrapper.get_base_url()}/",
294
- f"api/v1/extractionv2/extraction_agents/{extraction_agent_id}",
225
+ f"api/v1/extractionv2/extraction-agents/{extraction_agent_id}",
295
226
  ),
296
- params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
297
227
  headers=self._client_wrapper.get_headers(),
298
228
  timeout=60,
299
229
  )
@@ -307,20 +237,10 @@ class LlamaExtractClient:
307
237
  raise ApiError(status_code=_response.status_code, body=_response.text)
308
238
  raise ApiError(status_code=_response.status_code, body=_response_json)
309
239
 
310
- def list_jobs(
311
- self,
312
- *,
313
- extraction_agent_id: str,
314
- project_id: typing.Optional[str] = None,
315
- organization_id: typing.Optional[str] = None,
316
- ) -> typing.List[ExtractJob]:
240
+ def list_jobs(self, *, extraction_agent_id: str) -> typing.List[ExtractJob]:
317
241
  """
318
242
  Parameters:
319
243
  - extraction_agent_id: str.
320
-
321
- - project_id: typing.Optional[str].
322
-
323
- - organization_id: typing.Optional[str].
324
244
  ---
325
245
  from llama_cloud.client import LlamaCloud
326
246
 
@@ -334,13 +254,7 @@ class LlamaExtractClient:
334
254
  _response = self._client_wrapper.httpx_client.request(
335
255
  "GET",
336
256
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/jobs"),
337
- params=remove_none_from_dict(
338
- {
339
- "extraction_agent_id": extraction_agent_id,
340
- "project_id": project_id,
341
- "organization_id": organization_id,
342
- }
343
- ),
257
+ params=remove_none_from_dict({"extraction_agent_id": extraction_agent_id}),
344
258
  headers=self._client_wrapper.get_headers(),
345
259
  timeout=60,
346
260
  )
@@ -354,19 +268,9 @@ class LlamaExtractClient:
354
268
  raise ApiError(status_code=_response.status_code, body=_response.text)
355
269
  raise ApiError(status_code=_response.status_code, body=_response_json)
356
270
 
357
- def run_job(
358
- self,
359
- *,
360
- project_id: typing.Optional[str] = None,
361
- organization_id: typing.Optional[str] = None,
362
- request: ExtractJobCreate,
363
- ) -> ExtractJob:
271
+ def run_job(self, *, request: ExtractJobCreate) -> ExtractJob:
364
272
  """
365
273
  Parameters:
366
- - project_id: typing.Optional[str].
367
-
368
- - organization_id: typing.Optional[str].
369
-
370
274
  - request: ExtractJobCreate.
371
275
  ---
372
276
  from llama_cloud import ExtractConfig, ExtractJobCreate, ExtractMode
@@ -388,7 +292,6 @@ class LlamaExtractClient:
388
292
  _response = self._client_wrapper.httpx_client.request(
389
293
  "POST",
390
294
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/jobs"),
391
- params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
392
295
  json=jsonable_encoder(request),
393
296
  headers=self._client_wrapper.get_headers(),
394
297
  timeout=60,
@@ -403,16 +306,10 @@ class LlamaExtractClient:
403
306
  raise ApiError(status_code=_response.status_code, body=_response.text)
404
307
  raise ApiError(status_code=_response.status_code, body=_response_json)
405
308
 
406
- def get_job(
407
- self, job_id: str, *, project_id: typing.Optional[str] = None, organization_id: typing.Optional[str] = None
408
- ) -> ExtractJob:
309
+ def get_job(self, job_id: str) -> ExtractJob:
409
310
  """
410
311
  Parameters:
411
312
  - job_id: str.
412
-
413
- - project_id: typing.Optional[str].
414
-
415
- - organization_id: typing.Optional[str].
416
313
  ---
417
314
  from llama_cloud.client import LlamaCloud
418
315
 
@@ -426,7 +323,6 @@ class LlamaExtractClient:
426
323
  _response = self._client_wrapper.httpx_client.request(
427
324
  "GET",
428
325
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/extractionv2/jobs/{job_id}"),
429
- params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
430
326
  headers=self._client_wrapper.get_headers(),
431
327
  timeout=60,
432
328
  )
@@ -441,19 +337,10 @@ class LlamaExtractClient:
441
337
  raise ApiError(status_code=_response.status_code, body=_response_json)
442
338
 
443
339
  def run_job_with_parsed_file_test(
444
- self,
445
- *,
446
- project_id: typing.Optional[str] = None,
447
- organization_id: typing.Optional[str] = None,
448
- job_create: ExtractJobCreate,
449
- extract_settings: typing.Optional[LlamaExtractSettings] = OMIT,
450
- ) -> typing.Optional[ExtractResultset]:
340
+ self, *, job_create: ExtractJobCreate, extract_settings: typing.Optional[LlamaExtractSettings] = OMIT
341
+ ) -> ExtractResultset:
451
342
  """
452
343
  Parameters:
453
- - project_id: typing.Optional[str].
454
-
455
- - organization_id: typing.Optional[str].
456
-
457
344
  - job_create: ExtractJobCreate.
458
345
 
459
346
  - extract_settings: typing.Optional[LlamaExtractSettings].
@@ -486,13 +373,12 @@ class LlamaExtractClient:
486
373
  _response = self._client_wrapper.httpx_client.request(
487
374
  "POST",
488
375
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/jobs/parsed/test"),
489
- params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
490
376
  json=jsonable_encoder(_request),
491
377
  headers=self._client_wrapper.get_headers(),
492
378
  timeout=60,
493
379
  )
494
380
  if 200 <= _response.status_code < 300:
495
- return pydantic.parse_obj_as(typing.Optional[ExtractResultset], _response.json()) # type: ignore
381
+ return pydantic.parse_obj_as(ExtractResultset, _response.json()) # type: ignore
496
382
  if _response.status_code == 422:
497
383
  raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
498
384
  try:
@@ -501,19 +387,9 @@ class LlamaExtractClient:
501
387
  raise ApiError(status_code=_response.status_code, body=_response.text)
502
388
  raise ApiError(status_code=_response.status_code, body=_response_json)
503
389
 
504
- def run_job_with_parsed_file(
505
- self,
506
- *,
507
- project_id: typing.Optional[str] = None,
508
- organization_id: typing.Optional[str] = None,
509
- request: ExtractJobCreate,
510
- ) -> typing.Optional[ExtractResultset]:
390
+ def run_job_with_parsed_file(self, *, request: ExtractJobCreate) -> ExtractResultset:
511
391
  """
512
392
  Parameters:
513
- - project_id: typing.Optional[str].
514
-
515
- - organization_id: typing.Optional[str].
516
-
517
393
  - request: ExtractJobCreate.
518
394
  ---
519
395
  from llama_cloud import ExtractConfig, ExtractJobCreate, ExtractMode
@@ -535,13 +411,12 @@ class LlamaExtractClient:
535
411
  _response = self._client_wrapper.httpx_client.request(
536
412
  "POST",
537
413
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/jobs/parsed"),
538
- params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
539
414
  json=jsonable_encoder(request),
540
415
  headers=self._client_wrapper.get_headers(),
541
416
  timeout=60,
542
417
  )
543
418
  if 200 <= _response.status_code < 300:
544
- return pydantic.parse_obj_as(typing.Optional[ExtractResultset], _response.json()) # type: ignore
419
+ return pydantic.parse_obj_as(ExtractResultset, _response.json()) # type: ignore
545
420
  if _response.status_code == 422:
546
421
  raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
547
422
  try:
@@ -551,19 +426,10 @@ class LlamaExtractClient:
551
426
  raise ApiError(status_code=_response.status_code, body=_response_json)
552
427
 
553
428
  def run_job_test_user(
554
- self,
555
- *,
556
- project_id: typing.Optional[str] = None,
557
- organization_id: typing.Optional[str] = None,
558
- job_create: ExtractJobCreate,
559
- extract_settings: typing.Optional[LlamaExtractSettings] = OMIT,
429
+ self, *, job_create: ExtractJobCreate, extract_settings: typing.Optional[LlamaExtractSettings] = OMIT
560
430
  ) -> ExtractJob:
561
431
  """
562
432
  Parameters:
563
- - project_id: typing.Optional[str].
564
-
565
- - organization_id: typing.Optional[str].
566
-
567
433
  - job_create: ExtractJobCreate.
568
434
 
569
435
  - extract_settings: typing.Optional[LlamaExtractSettings].
@@ -596,7 +462,6 @@ class LlamaExtractClient:
596
462
  _response = self._client_wrapper.httpx_client.request(
597
463
  "POST",
598
464
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/jobs/test"),
599
- params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
600
465
  json=jsonable_encoder(_request),
601
466
  headers=self._client_wrapper.get_headers(),
602
467
  timeout=60,
@@ -611,20 +476,9 @@ class LlamaExtractClient:
611
476
  raise ApiError(status_code=_response.status_code, body=_response.text)
612
477
  raise ApiError(status_code=_response.status_code, body=_response_json)
613
478
 
614
- def run_jobs_in_batch(
615
- self,
616
- *,
617
- project_id: typing.Optional[str] = None,
618
- organization_id: typing.Optional[str] = None,
619
- extraction_agent_id: str,
620
- file_ids: typing.List[str],
621
- ) -> typing.List[ExtractJob]:
479
+ def run_jobs_in_batch(self, *, extraction_agent_id: str, file_ids: typing.List[str]) -> typing.List[ExtractJob]:
622
480
  """
623
481
  Parameters:
624
- - project_id: typing.Optional[str].
625
-
626
- - organization_id: typing.Optional[str].
627
-
628
482
  - extraction_agent_id: str. The id of the extraction agent
629
483
 
630
484
  - file_ids: typing.List[str]. The ids of the files
@@ -642,7 +496,6 @@ class LlamaExtractClient:
642
496
  _response = self._client_wrapper.httpx_client.request(
643
497
  "POST",
644
498
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/jobs/batch"),
645
- params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
646
499
  json=jsonable_encoder({"extraction_agent_id": extraction_agent_id, "file_ids": file_ids}),
647
500
  headers=self._client_wrapper.get_headers(),
648
501
  timeout=60,
@@ -657,16 +510,10 @@ class LlamaExtractClient:
657
510
  raise ApiError(status_code=_response.status_code, body=_response.text)
658
511
  raise ApiError(status_code=_response.status_code, body=_response_json)
659
512
 
660
- def get_job_result(
661
- self, job_id: str, *, project_id: typing.Optional[str] = None, organization_id: typing.Optional[str] = None
662
- ) -> ExtractResultset:
513
+ def get_job_result(self, job_id: str) -> ExtractResultset:
663
514
  """
664
515
  Parameters:
665
516
  - job_id: str.
666
-
667
- - project_id: typing.Optional[str].
668
-
669
- - organization_id: typing.Optional[str].
670
517
  ---
671
518
  from llama_cloud.client import LlamaCloud
672
519
 
@@ -682,7 +529,6 @@ class LlamaExtractClient:
682
529
  urllib.parse.urljoin(
683
530
  f"{self._client_wrapper.get_base_url()}/", f"api/v1/extractionv2/jobs/{job_id}/result"
684
531
  ),
685
- params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
686
532
  headers=self._client_wrapper.get_headers(),
687
533
  timeout=60,
688
534
  )
@@ -696,35 +542,39 @@ class LlamaExtractClient:
696
542
  raise ApiError(status_code=_response.status_code, body=_response.text)
697
543
  raise ApiError(status_code=_response.status_code, body=_response_json)
698
544
 
699
- def get_extract_run_api_v_1_extractionv_2_runs_run_id_get(
700
- self, run_id: str, *, project_id: typing.Optional[str] = None, organization_id: typing.Optional[str] = None
701
- ) -> ExtractRun:
545
+ def list_extract_runs(
546
+ self,
547
+ *,
548
+ extraction_agent_id: typing.Optional[str] = None,
549
+ run_id: typing.Optional[str] = None,
550
+ job_id: typing.Optional[str] = None,
551
+ ) -> typing.List[ExtractRun]:
702
552
  """
703
553
  Parameters:
704
- - run_id: str.
554
+ - extraction_agent_id: typing.Optional[str].
705
555
 
706
- - project_id: typing.Optional[str].
556
+ - run_id: typing.Optional[str].
707
557
 
708
- - organization_id: typing.Optional[str].
558
+ - job_id: typing.Optional[str].
709
559
  ---
710
560
  from llama_cloud.client import LlamaCloud
711
561
 
712
562
  client = LlamaCloud(
713
563
  token="YOUR_TOKEN",
714
564
  )
715
- client.llama_extract.get_extract_run_api_v_1_extractionv_2_runs_run_id_get(
716
- run_id="string",
717
- )
565
+ client.llama_extract.list_extract_runs()
718
566
  """
719
567
  _response = self._client_wrapper.httpx_client.request(
720
568
  "GET",
721
- urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/extractionv2/runs/{run_id}"),
722
- params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
569
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/runs"),
570
+ params=remove_none_from_dict(
571
+ {"extraction_agent_id": extraction_agent_id, "run_id": run_id, "job_id": job_id}
572
+ ),
723
573
  headers=self._client_wrapper.get_headers(),
724
574
  timeout=60,
725
575
  )
726
576
  if 200 <= _response.status_code < 300:
727
- return pydantic.parse_obj_as(ExtractRun, _response.json()) # type: ignore
577
+ return pydantic.parse_obj_as(typing.List[ExtractRun], _response.json()) # type: ignore
728
578
  if _response.status_code == 422:
729
579
  raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
730
580
  try:
@@ -738,81 +588,26 @@ class AsyncLlamaExtractClient:
738
588
  def __init__(self, *, client_wrapper: AsyncClientWrapper):
739
589
  self._client_wrapper = client_wrapper
740
590
 
741
- async def create_extraction_agent(
742
- self,
743
- *,
744
- project_id: typing.Optional[str] = None,
745
- organization_id: typing.Optional[str] = None,
746
- name: str,
747
- data_schema: typing.Dict[str, typing.Optional[ExtractAgentCreateDataSchemaValue]],
748
- config: ExtractConfig,
749
- ) -> ExtractAgent:
750
- """
751
- Parameters:
752
- - project_id: typing.Optional[str].
753
-
754
- - organization_id: typing.Optional[str].
755
-
756
- - name: str. The name of the extraction schema
757
-
758
- - data_schema: typing.Dict[str, typing.Optional[ExtractAgentCreateDataSchemaValue]]. The schema of the data.
759
-
760
- - config: ExtractConfig. The configuration parameters for the extraction agent.
761
- ---
762
- from llama_cloud import ExtractConfig, ExtractMode
763
- from llama_cloud.client import AsyncLlamaCloud
764
-
765
- client = AsyncLlamaCloud(
766
- token="YOUR_TOKEN",
767
- )
768
- await client.llama_extract.create_extraction_agent(
769
- name="string",
770
- data_schema={},
771
- config=ExtractConfig(
772
- extraction_mode=ExtractMode.PER_DOC,
773
- ),
774
- )
775
- """
776
- _response = await self._client_wrapper.httpx_client.request(
777
- "POST",
778
- urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/extraction_agent"),
779
- params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
780
- json=jsonable_encoder({"name": name, "data_schema": data_schema, "config": config}),
781
- headers=self._client_wrapper.get_headers(),
782
- timeout=60,
783
- )
784
- if 200 <= _response.status_code < 300:
785
- return pydantic.parse_obj_as(ExtractAgent, _response.json()) # type: ignore
786
- if _response.status_code == 422:
787
- raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
788
- try:
789
- _response_json = _response.json()
790
- except JSONDecodeError:
791
- raise ApiError(status_code=_response.status_code, body=_response.text)
792
- raise ApiError(status_code=_response.status_code, body=_response_json)
793
-
794
591
  async def list_extraction_agents(
795
- self, *, project_id: str, organization_id: typing.Optional[str] = None
592
+ self, *, project_id: typing.Optional[str] = None, name: typing.Optional[str] = None
796
593
  ) -> typing.List[ExtractAgent]:
797
594
  """
798
595
  Parameters:
799
- - project_id: str.
596
+ - project_id: typing.Optional[str].
800
597
 
801
- - organization_id: typing.Optional[str].
598
+ - name: typing.Optional[str].
802
599
  ---
803
600
  from llama_cloud.client import AsyncLlamaCloud
804
601
 
805
602
  client = AsyncLlamaCloud(
806
603
  token="YOUR_TOKEN",
807
604
  )
808
- await client.llama_extract.list_extraction_agents(
809
- project_id="string",
810
- )
605
+ await client.llama_extract.list_extraction_agents()
811
606
  """
812
607
  _response = await self._client_wrapper.httpx_client.request(
813
608
  "GET",
814
- urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/extraction_agents"),
815
- params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
609
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/extraction-agents"),
610
+ params=remove_none_from_dict({"project_id": project_id, "name": name}),
816
611
  headers=self._client_wrapper.get_headers(),
817
612
  timeout=60,
818
613
  )
@@ -826,32 +621,46 @@ class AsyncLlamaExtractClient:
826
621
  raise ApiError(status_code=_response.status_code, body=_response.text)
827
622
  raise ApiError(status_code=_response.status_code, body=_response_json)
828
623
 
829
- async def get_extraction_agent_by_name(
830
- self, *, name: str, project_id: typing.Optional[str] = None, organization_id: typing.Optional[str] = None
624
+ async def create_extraction_agent(
625
+ self,
626
+ *,
627
+ project_id: typing.Optional[str] = None,
628
+ organization_id: typing.Optional[str] = None,
629
+ name: str,
630
+ data_schema: typing.Dict[str, typing.Optional[ExtractAgentCreateDataSchemaValue]],
631
+ config: ExtractConfig,
831
632
  ) -> ExtractAgent:
832
633
  """
833
634
  Parameters:
834
- - name: str.
835
-
836
635
  - project_id: typing.Optional[str].
837
636
 
838
637
  - organization_id: typing.Optional[str].
638
+
639
+ - name: str. The name of the extraction schema
640
+
641
+ - data_schema: typing.Dict[str, typing.Optional[ExtractAgentCreateDataSchemaValue]]. The schema of the data.
642
+
643
+ - config: ExtractConfig. The configuration parameters for the extraction agent.
839
644
  ---
645
+ from llama_cloud import ExtractConfig, ExtractMode
840
646
  from llama_cloud.client import AsyncLlamaCloud
841
647
 
842
648
  client = AsyncLlamaCloud(
843
649
  token="YOUR_TOKEN",
844
650
  )
845
- await client.llama_extract.get_extraction_agent_by_name(
651
+ await client.llama_extract.create_extraction_agent(
846
652
  name="string",
653
+ data_schema={},
654
+ config=ExtractConfig(
655
+ extraction_mode=ExtractMode.PER_DOC,
656
+ ),
847
657
  )
848
658
  """
849
659
  _response = await self._client_wrapper.httpx_client.request(
850
- "GET",
851
- urllib.parse.urljoin(
852
- f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/extraction_agents/by_name"
853
- ),
854
- params=remove_none_from_dict({"name": name, "project_id": project_id, "organization_id": organization_id}),
660
+ "POST",
661
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/extraction-agents"),
662
+ params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
663
+ json=jsonable_encoder({"name": name, "data_schema": data_schema, "config": config}),
855
664
  headers=self._client_wrapper.get_headers(),
856
665
  timeout=60,
857
666
  )
@@ -865,20 +674,10 @@ class AsyncLlamaExtractClient:
865
674
  raise ApiError(status_code=_response.status_code, body=_response.text)
866
675
  raise ApiError(status_code=_response.status_code, body=_response_json)
867
676
 
868
- async def get_extraction_agent(
869
- self,
870
- extraction_agent_id: str,
871
- *,
872
- project_id: typing.Optional[str] = None,
873
- organization_id: typing.Optional[str] = None,
874
- ) -> ExtractAgent:
677
+ async def get_extraction_agent(self, extraction_agent_id: str) -> ExtractAgent:
875
678
  """
876
679
  Parameters:
877
680
  - extraction_agent_id: str.
878
-
879
- - project_id: typing.Optional[str].
880
-
881
- - organization_id: typing.Optional[str].
882
681
  ---
883
682
  from llama_cloud.client import AsyncLlamaCloud
884
683
 
@@ -893,9 +692,8 @@ class AsyncLlamaExtractClient:
893
692
  "GET",
894
693
  urllib.parse.urljoin(
895
694
  f"{self._client_wrapper.get_base_url()}/",
896
- f"api/v1/extractionv2/extraction_agents/{extraction_agent_id}",
695
+ f"api/v1/extractionv2/extraction-agents/{extraction_agent_id}",
897
696
  ),
898
- params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
899
697
  headers=self._client_wrapper.get_headers(),
900
698
  timeout=60,
901
699
  )
@@ -913,8 +711,6 @@ class AsyncLlamaExtractClient:
913
711
  self,
914
712
  extraction_agent_id: str,
915
713
  *,
916
- project_id: typing.Optional[str] = None,
917
- organization_id: typing.Optional[str] = None,
918
714
  data_schema: typing.Dict[str, typing.Optional[ExtractAgentUpdateDataSchemaValue]],
919
715
  config: ExtractConfig,
920
716
  ) -> ExtractAgent:
@@ -922,10 +718,6 @@ class AsyncLlamaExtractClient:
922
718
  Parameters:
923
719
  - extraction_agent_id: str.
924
720
 
925
- - project_id: typing.Optional[str].
926
-
927
- - organization_id: typing.Optional[str].
928
-
929
721
  - data_schema: typing.Dict[str, typing.Optional[ExtractAgentUpdateDataSchemaValue]]. The schema of the data
930
722
 
931
723
  - config: ExtractConfig. The configuration parameters for the extraction agent.
@@ -948,9 +740,8 @@ class AsyncLlamaExtractClient:
948
740
  "PUT",
949
741
  urllib.parse.urljoin(
950
742
  f"{self._client_wrapper.get_base_url()}/",
951
- f"api/v1/extractionv2/extraction_agents/{extraction_agent_id}",
743
+ f"api/v1/extractionv2/extraction-agents/{extraction_agent_id}",
952
744
  ),
953
- params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
954
745
  json=jsonable_encoder({"data_schema": data_schema, "config": config}),
955
746
  headers=self._client_wrapper.get_headers(),
956
747
  timeout=60,
@@ -965,20 +756,10 @@ class AsyncLlamaExtractClient:
965
756
  raise ApiError(status_code=_response.status_code, body=_response.text)
966
757
  raise ApiError(status_code=_response.status_code, body=_response_json)
967
758
 
968
- async def delete_extraction_agent(
969
- self,
970
- extraction_agent_id: str,
971
- *,
972
- project_id: typing.Optional[str] = None,
973
- organization_id: typing.Optional[str] = None,
974
- ) -> typing.Any:
759
+ async def delete_extraction_agent(self, extraction_agent_id: str) -> typing.Any:
975
760
  """
976
761
  Parameters:
977
762
  - extraction_agent_id: str.
978
-
979
- - project_id: typing.Optional[str].
980
-
981
- - organization_id: typing.Optional[str].
982
763
  ---
983
764
  from llama_cloud.client import AsyncLlamaCloud
984
765
 
@@ -993,9 +774,8 @@ class AsyncLlamaExtractClient:
993
774
  "DELETE",
994
775
  urllib.parse.urljoin(
995
776
  f"{self._client_wrapper.get_base_url()}/",
996
- f"api/v1/extractionv2/extraction_agents/{extraction_agent_id}",
777
+ f"api/v1/extractionv2/extraction-agents/{extraction_agent_id}",
997
778
  ),
998
- params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
999
779
  headers=self._client_wrapper.get_headers(),
1000
780
  timeout=60,
1001
781
  )
@@ -1009,20 +789,10 @@ class AsyncLlamaExtractClient:
1009
789
  raise ApiError(status_code=_response.status_code, body=_response.text)
1010
790
  raise ApiError(status_code=_response.status_code, body=_response_json)
1011
791
 
1012
- async def list_jobs(
1013
- self,
1014
- *,
1015
- extraction_agent_id: str,
1016
- project_id: typing.Optional[str] = None,
1017
- organization_id: typing.Optional[str] = None,
1018
- ) -> typing.List[ExtractJob]:
792
+ async def list_jobs(self, *, extraction_agent_id: str) -> typing.List[ExtractJob]:
1019
793
  """
1020
794
  Parameters:
1021
795
  - extraction_agent_id: str.
1022
-
1023
- - project_id: typing.Optional[str].
1024
-
1025
- - organization_id: typing.Optional[str].
1026
796
  ---
1027
797
  from llama_cloud.client import AsyncLlamaCloud
1028
798
 
@@ -1036,13 +806,7 @@ class AsyncLlamaExtractClient:
1036
806
  _response = await self._client_wrapper.httpx_client.request(
1037
807
  "GET",
1038
808
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/jobs"),
1039
- params=remove_none_from_dict(
1040
- {
1041
- "extraction_agent_id": extraction_agent_id,
1042
- "project_id": project_id,
1043
- "organization_id": organization_id,
1044
- }
1045
- ),
809
+ params=remove_none_from_dict({"extraction_agent_id": extraction_agent_id}),
1046
810
  headers=self._client_wrapper.get_headers(),
1047
811
  timeout=60,
1048
812
  )
@@ -1056,19 +820,9 @@ class AsyncLlamaExtractClient:
1056
820
  raise ApiError(status_code=_response.status_code, body=_response.text)
1057
821
  raise ApiError(status_code=_response.status_code, body=_response_json)
1058
822
 
1059
- async def run_job(
1060
- self,
1061
- *,
1062
- project_id: typing.Optional[str] = None,
1063
- organization_id: typing.Optional[str] = None,
1064
- request: ExtractJobCreate,
1065
- ) -> ExtractJob:
823
+ async def run_job(self, *, request: ExtractJobCreate) -> ExtractJob:
1066
824
  """
1067
825
  Parameters:
1068
- - project_id: typing.Optional[str].
1069
-
1070
- - organization_id: typing.Optional[str].
1071
-
1072
826
  - request: ExtractJobCreate.
1073
827
  ---
1074
828
  from llama_cloud import ExtractConfig, ExtractJobCreate, ExtractMode
@@ -1090,7 +844,6 @@ class AsyncLlamaExtractClient:
1090
844
  _response = await self._client_wrapper.httpx_client.request(
1091
845
  "POST",
1092
846
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/jobs"),
1093
- params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
1094
847
  json=jsonable_encoder(request),
1095
848
  headers=self._client_wrapper.get_headers(),
1096
849
  timeout=60,
@@ -1105,16 +858,10 @@ class AsyncLlamaExtractClient:
1105
858
  raise ApiError(status_code=_response.status_code, body=_response.text)
1106
859
  raise ApiError(status_code=_response.status_code, body=_response_json)
1107
860
 
1108
- async def get_job(
1109
- self, job_id: str, *, project_id: typing.Optional[str] = None, organization_id: typing.Optional[str] = None
1110
- ) -> ExtractJob:
861
+ async def get_job(self, job_id: str) -> ExtractJob:
1111
862
  """
1112
863
  Parameters:
1113
864
  - job_id: str.
1114
-
1115
- - project_id: typing.Optional[str].
1116
-
1117
- - organization_id: typing.Optional[str].
1118
865
  ---
1119
866
  from llama_cloud.client import AsyncLlamaCloud
1120
867
 
@@ -1128,7 +875,6 @@ class AsyncLlamaExtractClient:
1128
875
  _response = await self._client_wrapper.httpx_client.request(
1129
876
  "GET",
1130
877
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/extractionv2/jobs/{job_id}"),
1131
- params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
1132
878
  headers=self._client_wrapper.get_headers(),
1133
879
  timeout=60,
1134
880
  )
@@ -1143,19 +889,10 @@ class AsyncLlamaExtractClient:
1143
889
  raise ApiError(status_code=_response.status_code, body=_response_json)
1144
890
 
1145
891
  async def run_job_with_parsed_file_test(
1146
- self,
1147
- *,
1148
- project_id: typing.Optional[str] = None,
1149
- organization_id: typing.Optional[str] = None,
1150
- job_create: ExtractJobCreate,
1151
- extract_settings: typing.Optional[LlamaExtractSettings] = OMIT,
1152
- ) -> typing.Optional[ExtractResultset]:
892
+ self, *, job_create: ExtractJobCreate, extract_settings: typing.Optional[LlamaExtractSettings] = OMIT
893
+ ) -> ExtractResultset:
1153
894
  """
1154
895
  Parameters:
1155
- - project_id: typing.Optional[str].
1156
-
1157
- - organization_id: typing.Optional[str].
1158
-
1159
896
  - job_create: ExtractJobCreate.
1160
897
 
1161
898
  - extract_settings: typing.Optional[LlamaExtractSettings].
@@ -1188,13 +925,12 @@ class AsyncLlamaExtractClient:
1188
925
  _response = await self._client_wrapper.httpx_client.request(
1189
926
  "POST",
1190
927
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/jobs/parsed/test"),
1191
- params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
1192
928
  json=jsonable_encoder(_request),
1193
929
  headers=self._client_wrapper.get_headers(),
1194
930
  timeout=60,
1195
931
  )
1196
932
  if 200 <= _response.status_code < 300:
1197
- return pydantic.parse_obj_as(typing.Optional[ExtractResultset], _response.json()) # type: ignore
933
+ return pydantic.parse_obj_as(ExtractResultset, _response.json()) # type: ignore
1198
934
  if _response.status_code == 422:
1199
935
  raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
1200
936
  try:
@@ -1203,19 +939,9 @@ class AsyncLlamaExtractClient:
1203
939
  raise ApiError(status_code=_response.status_code, body=_response.text)
1204
940
  raise ApiError(status_code=_response.status_code, body=_response_json)
1205
941
 
1206
- async def run_job_with_parsed_file(
1207
- self,
1208
- *,
1209
- project_id: typing.Optional[str] = None,
1210
- organization_id: typing.Optional[str] = None,
1211
- request: ExtractJobCreate,
1212
- ) -> typing.Optional[ExtractResultset]:
942
+ async def run_job_with_parsed_file(self, *, request: ExtractJobCreate) -> ExtractResultset:
1213
943
  """
1214
944
  Parameters:
1215
- - project_id: typing.Optional[str].
1216
-
1217
- - organization_id: typing.Optional[str].
1218
-
1219
945
  - request: ExtractJobCreate.
1220
946
  ---
1221
947
  from llama_cloud import ExtractConfig, ExtractJobCreate, ExtractMode
@@ -1237,13 +963,12 @@ class AsyncLlamaExtractClient:
1237
963
  _response = await self._client_wrapper.httpx_client.request(
1238
964
  "POST",
1239
965
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/jobs/parsed"),
1240
- params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
1241
966
  json=jsonable_encoder(request),
1242
967
  headers=self._client_wrapper.get_headers(),
1243
968
  timeout=60,
1244
969
  )
1245
970
  if 200 <= _response.status_code < 300:
1246
- return pydantic.parse_obj_as(typing.Optional[ExtractResultset], _response.json()) # type: ignore
971
+ return pydantic.parse_obj_as(ExtractResultset, _response.json()) # type: ignore
1247
972
  if _response.status_code == 422:
1248
973
  raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
1249
974
  try:
@@ -1253,19 +978,10 @@ class AsyncLlamaExtractClient:
1253
978
  raise ApiError(status_code=_response.status_code, body=_response_json)
1254
979
 
1255
980
  async def run_job_test_user(
1256
- self,
1257
- *,
1258
- project_id: typing.Optional[str] = None,
1259
- organization_id: typing.Optional[str] = None,
1260
- job_create: ExtractJobCreate,
1261
- extract_settings: typing.Optional[LlamaExtractSettings] = OMIT,
981
+ self, *, job_create: ExtractJobCreate, extract_settings: typing.Optional[LlamaExtractSettings] = OMIT
1262
982
  ) -> ExtractJob:
1263
983
  """
1264
984
  Parameters:
1265
- - project_id: typing.Optional[str].
1266
-
1267
- - organization_id: typing.Optional[str].
1268
-
1269
985
  - job_create: ExtractJobCreate.
1270
986
 
1271
987
  - extract_settings: typing.Optional[LlamaExtractSettings].
@@ -1298,7 +1014,6 @@ class AsyncLlamaExtractClient:
1298
1014
  _response = await self._client_wrapper.httpx_client.request(
1299
1015
  "POST",
1300
1016
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/jobs/test"),
1301
- params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
1302
1017
  json=jsonable_encoder(_request),
1303
1018
  headers=self._client_wrapper.get_headers(),
1304
1019
  timeout=60,
@@ -1314,19 +1029,10 @@ class AsyncLlamaExtractClient:
1314
1029
  raise ApiError(status_code=_response.status_code, body=_response_json)
1315
1030
 
1316
1031
  async def run_jobs_in_batch(
1317
- self,
1318
- *,
1319
- project_id: typing.Optional[str] = None,
1320
- organization_id: typing.Optional[str] = None,
1321
- extraction_agent_id: str,
1322
- file_ids: typing.List[str],
1032
+ self, *, extraction_agent_id: str, file_ids: typing.List[str]
1323
1033
  ) -> typing.List[ExtractJob]:
1324
1034
  """
1325
1035
  Parameters:
1326
- - project_id: typing.Optional[str].
1327
-
1328
- - organization_id: typing.Optional[str].
1329
-
1330
1036
  - extraction_agent_id: str. The id of the extraction agent
1331
1037
 
1332
1038
  - file_ids: typing.List[str]. The ids of the files
@@ -1344,7 +1050,6 @@ class AsyncLlamaExtractClient:
1344
1050
  _response = await self._client_wrapper.httpx_client.request(
1345
1051
  "POST",
1346
1052
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/jobs/batch"),
1347
- params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
1348
1053
  json=jsonable_encoder({"extraction_agent_id": extraction_agent_id, "file_ids": file_ids}),
1349
1054
  headers=self._client_wrapper.get_headers(),
1350
1055
  timeout=60,
@@ -1359,16 +1064,10 @@ class AsyncLlamaExtractClient:
1359
1064
  raise ApiError(status_code=_response.status_code, body=_response.text)
1360
1065
  raise ApiError(status_code=_response.status_code, body=_response_json)
1361
1066
 
1362
- async def get_job_result(
1363
- self, job_id: str, *, project_id: typing.Optional[str] = None, organization_id: typing.Optional[str] = None
1364
- ) -> ExtractResultset:
1067
+ async def get_job_result(self, job_id: str) -> ExtractResultset:
1365
1068
  """
1366
1069
  Parameters:
1367
1070
  - job_id: str.
1368
-
1369
- - project_id: typing.Optional[str].
1370
-
1371
- - organization_id: typing.Optional[str].
1372
1071
  ---
1373
1072
  from llama_cloud.client import AsyncLlamaCloud
1374
1073
 
@@ -1384,7 +1083,6 @@ class AsyncLlamaExtractClient:
1384
1083
  urllib.parse.urljoin(
1385
1084
  f"{self._client_wrapper.get_base_url()}/", f"api/v1/extractionv2/jobs/{job_id}/result"
1386
1085
  ),
1387
- params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
1388
1086
  headers=self._client_wrapper.get_headers(),
1389
1087
  timeout=60,
1390
1088
  )
@@ -1398,35 +1096,39 @@ class AsyncLlamaExtractClient:
1398
1096
  raise ApiError(status_code=_response.status_code, body=_response.text)
1399
1097
  raise ApiError(status_code=_response.status_code, body=_response_json)
1400
1098
 
1401
- async def get_extract_run_api_v_1_extractionv_2_runs_run_id_get(
1402
- self, run_id: str, *, project_id: typing.Optional[str] = None, organization_id: typing.Optional[str] = None
1403
- ) -> ExtractRun:
1099
+ async def list_extract_runs(
1100
+ self,
1101
+ *,
1102
+ extraction_agent_id: typing.Optional[str] = None,
1103
+ run_id: typing.Optional[str] = None,
1104
+ job_id: typing.Optional[str] = None,
1105
+ ) -> typing.List[ExtractRun]:
1404
1106
  """
1405
1107
  Parameters:
1406
- - run_id: str.
1108
+ - extraction_agent_id: typing.Optional[str].
1407
1109
 
1408
- - project_id: typing.Optional[str].
1110
+ - run_id: typing.Optional[str].
1409
1111
 
1410
- - organization_id: typing.Optional[str].
1112
+ - job_id: typing.Optional[str].
1411
1113
  ---
1412
1114
  from llama_cloud.client import AsyncLlamaCloud
1413
1115
 
1414
1116
  client = AsyncLlamaCloud(
1415
1117
  token="YOUR_TOKEN",
1416
1118
  )
1417
- await client.llama_extract.get_extract_run_api_v_1_extractionv_2_runs_run_id_get(
1418
- run_id="string",
1419
- )
1119
+ await client.llama_extract.list_extract_runs()
1420
1120
  """
1421
1121
  _response = await self._client_wrapper.httpx_client.request(
1422
1122
  "GET",
1423
- urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/extractionv2/runs/{run_id}"),
1424
- params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
1123
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/runs"),
1124
+ params=remove_none_from_dict(
1125
+ {"extraction_agent_id": extraction_agent_id, "run_id": run_id, "job_id": job_id}
1126
+ ),
1425
1127
  headers=self._client_wrapper.get_headers(),
1426
1128
  timeout=60,
1427
1129
  )
1428
1130
  if 200 <= _response.status_code < 300:
1429
- return pydantic.parse_obj_as(ExtractRun, _response.json()) # type: ignore
1131
+ return pydantic.parse_obj_as(typing.List[ExtractRun], _response.json()) # type: ignore
1430
1132
  if _response.status_code == 422:
1431
1133
  raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
1432
1134
  try: