llama-cloud 0.1.8__py3-none-any.whl → 0.1.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of llama-cloud might be problematic. Click here for more details.

Files changed (51) hide show
  1. llama_cloud/__init__.py +32 -22
  2. llama_cloud/client.py +0 -3
  3. llama_cloud/resources/__init__.py +14 -8
  4. llama_cloud/resources/chat_apps/client.py +99 -133
  5. llama_cloud/resources/files/client.py +34 -6
  6. llama_cloud/resources/llama_extract/__init__.py +16 -2
  7. llama_cloud/resources/llama_extract/client.py +238 -366
  8. llama_cloud/resources/llama_extract/types/__init__.py +14 -3
  9. llama_cloud/resources/llama_extract/types/extract_agent_create_data_schema.py +9 -0
  10. llama_cloud/resources/llama_extract/types/{extract_agent_create_data_schema_value.py → extract_agent_create_data_schema_zero_value.py} +1 -1
  11. llama_cloud/resources/llama_extract/types/extract_agent_update_data_schema.py +9 -0
  12. llama_cloud/resources/{extraction/types/extraction_schema_create_data_schema_value.py → llama_extract/types/extract_agent_update_data_schema_zero_value.py} +1 -1
  13. llama_cloud/resources/llama_extract/types/extract_schema_validate_request_data_schema.py +9 -0
  14. llama_cloud/resources/llama_extract/types/extract_schema_validate_request_data_schema_zero_value.py +7 -0
  15. llama_cloud/resources/organizations/client.py +8 -12
  16. llama_cloud/resources/parsing/client.py +146 -18
  17. llama_cloud/resources/reports/client.py +30 -26
  18. llama_cloud/resources/retrievers/client.py +16 -4
  19. llama_cloud/types/__init__.py +20 -12
  20. llama_cloud/types/chat_app.py +11 -9
  21. llama_cloud/types/chat_app_response.py +12 -10
  22. llama_cloud/types/cloud_mongo_db_atlas_vector_search.py +1 -0
  23. llama_cloud/types/extract_job.py +3 -1
  24. llama_cloud/types/extract_job_create.py +4 -2
  25. llama_cloud/types/extract_job_create_data_schema_override.py +9 -0
  26. llama_cloud/{resources/extraction/types/extraction_schema_update_data_schema_value.py → types/extract_job_create_data_schema_override_zero_value.py} +1 -1
  27. llama_cloud/types/extract_resultset.py +2 -6
  28. llama_cloud/types/extract_run.py +5 -0
  29. llama_cloud/types/extract_run_data.py +11 -0
  30. llama_cloud/types/extract_run_data_item_value.py +5 -0
  31. llama_cloud/types/extract_run_data_zero_value.py +5 -0
  32. llama_cloud/{resources/llama_extract/types/extract_agent_update_data_schema_value.py → types/extract_run_extraction_metadata_value.py} +1 -1
  33. llama_cloud/types/{extraction_job.py → extract_schema_validate_response.py} +3 -6
  34. llama_cloud/types/extract_schema_validate_response_data_schema_value.py +7 -0
  35. llama_cloud/types/extract_state.py +4 -4
  36. llama_cloud/types/llama_extract_settings.py +4 -0
  37. llama_cloud/types/llama_parse_parameters.py +11 -0
  38. llama_cloud/types/plan.py +4 -0
  39. llama_cloud/types/{extraction_result.py → preset_composite_retrieval_params.py} +5 -14
  40. llama_cloud/types/{extraction_schema.py → report_file_info.py} +5 -9
  41. llama_cloud/types/report_metadata.py +2 -1
  42. {llama_cloud-0.1.8.dist-info → llama_cloud-0.1.10.dist-info}/METADATA +2 -1
  43. {llama_cloud-0.1.8.dist-info → llama_cloud-0.1.10.dist-info}/RECORD +45 -42
  44. {llama_cloud-0.1.8.dist-info → llama_cloud-0.1.10.dist-info}/WHEEL +1 -1
  45. llama_cloud/resources/extraction/__init__.py +0 -5
  46. llama_cloud/resources/extraction/client.py +0 -756
  47. llama_cloud/resources/extraction/types/__init__.py +0 -6
  48. llama_cloud/types/extract_job_create_data_schema_override_value.py +0 -7
  49. llama_cloud/types/extraction_result_data_value.py +0 -5
  50. llama_cloud/types/extraction_schema_data_schema_value.py +0 -7
  51. {llama_cloud-0.1.8.dist-info → llama_cloud-0.1.10.dist-info}/LICENSE +0 -0
@@ -15,10 +15,12 @@ from ...types.extract_job import ExtractJob
15
15
  from ...types.extract_job_create import ExtractJobCreate
16
16
  from ...types.extract_resultset import ExtractResultset
17
17
  from ...types.extract_run import ExtractRun
18
+ from ...types.extract_schema_validate_response import ExtractSchemaValidateResponse
18
19
  from ...types.http_validation_error import HttpValidationError
19
20
  from ...types.llama_extract_settings import LlamaExtractSettings
20
- from .types.extract_agent_create_data_schema_value import ExtractAgentCreateDataSchemaValue
21
- from .types.extract_agent_update_data_schema_value import ExtractAgentUpdateDataSchemaValue
21
+ from .types.extract_agent_create_data_schema import ExtractAgentCreateDataSchema
22
+ from .types.extract_agent_update_data_schema import ExtractAgentUpdateDataSchema
23
+ from .types.extract_schema_validate_request_data_schema import ExtractSchemaValidateRequestDataSchema
22
24
 
23
25
  try:
24
26
  import pydantic
@@ -36,13 +38,42 @@ class LlamaExtractClient:
36
38
  def __init__(self, *, client_wrapper: SyncClientWrapper):
37
39
  self._client_wrapper = client_wrapper
38
40
 
41
+ def list_extraction_agents(self, *, project_id: typing.Optional[str] = None) -> typing.List[ExtractAgent]:
42
+ """
43
+ Parameters:
44
+ - project_id: typing.Optional[str].
45
+ ---
46
+ from llama_cloud.client import LlamaCloud
47
+
48
+ client = LlamaCloud(
49
+ token="YOUR_TOKEN",
50
+ )
51
+ client.llama_extract.list_extraction_agents()
52
+ """
53
+ _response = self._client_wrapper.httpx_client.request(
54
+ "GET",
55
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/extraction-agents"),
56
+ params=remove_none_from_dict({"project_id": project_id}),
57
+ headers=self._client_wrapper.get_headers(),
58
+ timeout=60,
59
+ )
60
+ if 200 <= _response.status_code < 300:
61
+ return pydantic.parse_obj_as(typing.List[ExtractAgent], _response.json()) # type: ignore
62
+ if _response.status_code == 422:
63
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
64
+ try:
65
+ _response_json = _response.json()
66
+ except JSONDecodeError:
67
+ raise ApiError(status_code=_response.status_code, body=_response.text)
68
+ raise ApiError(status_code=_response.status_code, body=_response_json)
69
+
39
70
  def create_extraction_agent(
40
71
  self,
41
72
  *,
42
73
  project_id: typing.Optional[str] = None,
43
74
  organization_id: typing.Optional[str] = None,
44
75
  name: str,
45
- data_schema: typing.Dict[str, typing.Optional[ExtractAgentCreateDataSchemaValue]],
76
+ data_schema: ExtractAgentCreateDataSchema,
46
77
  config: ExtractConfig,
47
78
  ) -> ExtractAgent:
48
79
  """
@@ -53,7 +84,7 @@ class LlamaExtractClient:
53
84
 
54
85
  - name: str. The name of the extraction schema
55
86
 
56
- - data_schema: typing.Dict[str, typing.Optional[ExtractAgentCreateDataSchemaValue]]. The schema of the data.
87
+ - data_schema: ExtractAgentCreateDataSchema. The schema of the data.
57
88
 
58
89
  - config: ExtractConfig. The configuration parameters for the extraction agent.
59
90
  ---
@@ -65,7 +96,6 @@ class LlamaExtractClient:
65
96
  )
66
97
  client.llama_extract.create_extraction_agent(
67
98
  name="string",
68
- data_schema={},
69
99
  config=ExtractConfig(
70
100
  extraction_mode=ExtractMode.PER_DOC,
71
101
  ),
@@ -73,7 +103,7 @@ class LlamaExtractClient:
73
103
  """
74
104
  _response = self._client_wrapper.httpx_client.request(
75
105
  "POST",
76
- urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/extraction_agent"),
106
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/extraction-agents"),
77
107
  params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
78
108
  json=jsonable_encoder({"name": name, "data_schema": data_schema, "config": config}),
79
109
  headers=self._client_wrapper.get_headers(),
@@ -89,33 +119,34 @@ class LlamaExtractClient:
89
119
  raise ApiError(status_code=_response.status_code, body=_response.text)
90
120
  raise ApiError(status_code=_response.status_code, body=_response_json)
91
121
 
92
- def list_extraction_agents(
93
- self, *, project_id: str, organization_id: typing.Optional[str] = None
94
- ) -> typing.List[ExtractAgent]:
122
+ def validate_extraction_schema(
123
+ self, *, data_schema: ExtractSchemaValidateRequestDataSchema
124
+ ) -> ExtractSchemaValidateResponse:
95
125
  """
96
- Parameters:
97
- - project_id: str.
126
+ Validates an extraction agent's schema definition.
127
+ Returns the normalized and validated schema if valid, otherwise raises an HTTP 400.
98
128
 
99
- - organization_id: typing.Optional[str].
129
+ Parameters:
130
+ - data_schema: ExtractSchemaValidateRequestDataSchema.
100
131
  ---
101
132
  from llama_cloud.client import LlamaCloud
102
133
 
103
134
  client = LlamaCloud(
104
135
  token="YOUR_TOKEN",
105
136
  )
106
- client.llama_extract.list_extraction_agents(
107
- project_id="string",
108
- )
137
+ client.llama_extract.validate_extraction_schema()
109
138
  """
110
139
  _response = self._client_wrapper.httpx_client.request(
111
- "GET",
112
- urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/extraction_agents"),
113
- params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
140
+ "POST",
141
+ urllib.parse.urljoin(
142
+ f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/extraction-agents/schema/validation"
143
+ ),
144
+ json=jsonable_encoder({"data_schema": data_schema}),
114
145
  headers=self._client_wrapper.get_headers(),
115
146
  timeout=60,
116
147
  )
117
148
  if 200 <= _response.status_code < 300:
118
- return pydantic.parse_obj_as(typing.List[ExtractAgent], _response.json()) # type: ignore
149
+ return pydantic.parse_obj_as(ExtractSchemaValidateResponse, _response.json()) # type: ignore
119
150
  if _response.status_code == 422:
120
151
  raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
121
152
  try:
@@ -124,16 +155,12 @@ class LlamaExtractClient:
124
155
  raise ApiError(status_code=_response.status_code, body=_response.text)
125
156
  raise ApiError(status_code=_response.status_code, body=_response_json)
126
157
 
127
- def get_extraction_agent_by_name(
128
- self, *, name: str, project_id: typing.Optional[str] = None, organization_id: typing.Optional[str] = None
129
- ) -> ExtractAgent:
158
+ def get_extraction_agent_by_name(self, name: str, *, project_id: typing.Optional[str] = None) -> ExtractAgent:
130
159
  """
131
160
  Parameters:
132
161
  - name: str.
133
162
 
134
163
  - project_id: typing.Optional[str].
135
-
136
- - organization_id: typing.Optional[str].
137
164
  ---
138
165
  from llama_cloud.client import LlamaCloud
139
166
 
@@ -147,9 +174,9 @@ class LlamaExtractClient:
147
174
  _response = self._client_wrapper.httpx_client.request(
148
175
  "GET",
149
176
  urllib.parse.urljoin(
150
- f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/extraction_agents/by_name"
177
+ f"{self._client_wrapper.get_base_url()}/", f"api/v1/extractionv2/extraction-agents/by-name/{name}"
151
178
  ),
152
- params=remove_none_from_dict({"name": name, "project_id": project_id, "organization_id": organization_id}),
179
+ params=remove_none_from_dict({"project_id": project_id}),
153
180
  headers=self._client_wrapper.get_headers(),
154
181
  timeout=60,
155
182
  )
@@ -163,20 +190,10 @@ class LlamaExtractClient:
163
190
  raise ApiError(status_code=_response.status_code, body=_response.text)
164
191
  raise ApiError(status_code=_response.status_code, body=_response_json)
165
192
 
166
- def get_extraction_agent(
167
- self,
168
- extraction_agent_id: str,
169
- *,
170
- project_id: typing.Optional[str] = None,
171
- organization_id: typing.Optional[str] = None,
172
- ) -> ExtractAgent:
193
+ def get_extraction_agent(self, extraction_agent_id: str) -> ExtractAgent:
173
194
  """
174
195
  Parameters:
175
196
  - extraction_agent_id: str.
176
-
177
- - project_id: typing.Optional[str].
178
-
179
- - organization_id: typing.Optional[str].
180
197
  ---
181
198
  from llama_cloud.client import LlamaCloud
182
199
 
@@ -191,9 +208,8 @@ class LlamaExtractClient:
191
208
  "GET",
192
209
  urllib.parse.urljoin(
193
210
  f"{self._client_wrapper.get_base_url()}/",
194
- f"api/v1/extractionv2/extraction_agents/{extraction_agent_id}",
211
+ f"api/v1/extractionv2/extraction-agents/{extraction_agent_id}",
195
212
  ),
196
- params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
197
213
  headers=self._client_wrapper.get_headers(),
198
214
  timeout=60,
199
215
  )
@@ -208,23 +224,13 @@ class LlamaExtractClient:
208
224
  raise ApiError(status_code=_response.status_code, body=_response_json)
209
225
 
210
226
  def update_extraction_agent(
211
- self,
212
- extraction_agent_id: str,
213
- *,
214
- project_id: typing.Optional[str] = None,
215
- organization_id: typing.Optional[str] = None,
216
- data_schema: typing.Dict[str, typing.Optional[ExtractAgentUpdateDataSchemaValue]],
217
- config: ExtractConfig,
227
+ self, extraction_agent_id: str, *, data_schema: ExtractAgentUpdateDataSchema, config: ExtractConfig
218
228
  ) -> ExtractAgent:
219
229
  """
220
230
  Parameters:
221
231
  - extraction_agent_id: str.
222
232
 
223
- - project_id: typing.Optional[str].
224
-
225
- - organization_id: typing.Optional[str].
226
-
227
- - data_schema: typing.Dict[str, typing.Optional[ExtractAgentUpdateDataSchemaValue]]. The schema of the data
233
+ - data_schema: ExtractAgentUpdateDataSchema. The schema of the data
228
234
 
229
235
  - config: ExtractConfig. The configuration parameters for the extraction agent.
230
236
  ---
@@ -236,7 +242,6 @@ class LlamaExtractClient:
236
242
  )
237
243
  client.llama_extract.update_extraction_agent(
238
244
  extraction_agent_id="string",
239
- data_schema={},
240
245
  config=ExtractConfig(
241
246
  extraction_mode=ExtractMode.PER_DOC,
242
247
  ),
@@ -246,9 +251,8 @@ class LlamaExtractClient:
246
251
  "PUT",
247
252
  urllib.parse.urljoin(
248
253
  f"{self._client_wrapper.get_base_url()}/",
249
- f"api/v1/extractionv2/extraction_agents/{extraction_agent_id}",
254
+ f"api/v1/extractionv2/extraction-agents/{extraction_agent_id}",
250
255
  ),
251
- params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
252
256
  json=jsonable_encoder({"data_schema": data_schema, "config": config}),
253
257
  headers=self._client_wrapper.get_headers(),
254
258
  timeout=60,
@@ -263,20 +267,10 @@ class LlamaExtractClient:
263
267
  raise ApiError(status_code=_response.status_code, body=_response.text)
264
268
  raise ApiError(status_code=_response.status_code, body=_response_json)
265
269
 
266
- def delete_extraction_agent(
267
- self,
268
- extraction_agent_id: str,
269
- *,
270
- project_id: typing.Optional[str] = None,
271
- organization_id: typing.Optional[str] = None,
272
- ) -> typing.Any:
270
+ def delete_extraction_agent(self, extraction_agent_id: str) -> typing.Any:
273
271
  """
274
272
  Parameters:
275
273
  - extraction_agent_id: str.
276
-
277
- - project_id: typing.Optional[str].
278
-
279
- - organization_id: typing.Optional[str].
280
274
  ---
281
275
  from llama_cloud.client import LlamaCloud
282
276
 
@@ -291,9 +285,8 @@ class LlamaExtractClient:
291
285
  "DELETE",
292
286
  urllib.parse.urljoin(
293
287
  f"{self._client_wrapper.get_base_url()}/",
294
- f"api/v1/extractionv2/extraction_agents/{extraction_agent_id}",
288
+ f"api/v1/extractionv2/extraction-agents/{extraction_agent_id}",
295
289
  ),
296
- params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
297
290
  headers=self._client_wrapper.get_headers(),
298
291
  timeout=60,
299
292
  )
@@ -307,20 +300,10 @@ class LlamaExtractClient:
307
300
  raise ApiError(status_code=_response.status_code, body=_response.text)
308
301
  raise ApiError(status_code=_response.status_code, body=_response_json)
309
302
 
310
- def list_jobs(
311
- self,
312
- *,
313
- extraction_agent_id: str,
314
- project_id: typing.Optional[str] = None,
315
- organization_id: typing.Optional[str] = None,
316
- ) -> typing.List[ExtractJob]:
303
+ def list_jobs(self, *, extraction_agent_id: str) -> typing.List[ExtractJob]:
317
304
  """
318
305
  Parameters:
319
306
  - extraction_agent_id: str.
320
-
321
- - project_id: typing.Optional[str].
322
-
323
- - organization_id: typing.Optional[str].
324
307
  ---
325
308
  from llama_cloud.client import LlamaCloud
326
309
 
@@ -334,13 +317,7 @@ class LlamaExtractClient:
334
317
  _response = self._client_wrapper.httpx_client.request(
335
318
  "GET",
336
319
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/jobs"),
337
- params=remove_none_from_dict(
338
- {
339
- "extraction_agent_id": extraction_agent_id,
340
- "project_id": project_id,
341
- "organization_id": organization_id,
342
- }
343
- ),
320
+ params=remove_none_from_dict({"extraction_agent_id": extraction_agent_id}),
344
321
  headers=self._client_wrapper.get_headers(),
345
322
  timeout=60,
346
323
  )
@@ -354,19 +331,9 @@ class LlamaExtractClient:
354
331
  raise ApiError(status_code=_response.status_code, body=_response.text)
355
332
  raise ApiError(status_code=_response.status_code, body=_response_json)
356
333
 
357
- def run_job(
358
- self,
359
- *,
360
- project_id: typing.Optional[str] = None,
361
- organization_id: typing.Optional[str] = None,
362
- request: ExtractJobCreate,
363
- ) -> ExtractJob:
334
+ def run_job(self, *, request: ExtractJobCreate) -> ExtractJob:
364
335
  """
365
336
  Parameters:
366
- - project_id: typing.Optional[str].
367
-
368
- - organization_id: typing.Optional[str].
369
-
370
337
  - request: ExtractJobCreate.
371
338
  ---
372
339
  from llama_cloud import ExtractConfig, ExtractJobCreate, ExtractMode
@@ -388,7 +355,6 @@ class LlamaExtractClient:
388
355
  _response = self._client_wrapper.httpx_client.request(
389
356
  "POST",
390
357
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/jobs"),
391
- params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
392
358
  json=jsonable_encoder(request),
393
359
  headers=self._client_wrapper.get_headers(),
394
360
  timeout=60,
@@ -403,16 +369,10 @@ class LlamaExtractClient:
403
369
  raise ApiError(status_code=_response.status_code, body=_response.text)
404
370
  raise ApiError(status_code=_response.status_code, body=_response_json)
405
371
 
406
- def get_job(
407
- self, job_id: str, *, project_id: typing.Optional[str] = None, organization_id: typing.Optional[str] = None
408
- ) -> ExtractJob:
372
+ def get_job(self, job_id: str) -> ExtractJob:
409
373
  """
410
374
  Parameters:
411
375
  - job_id: str.
412
-
413
- - project_id: typing.Optional[str].
414
-
415
- - organization_id: typing.Optional[str].
416
376
  ---
417
377
  from llama_cloud.client import LlamaCloud
418
378
 
@@ -426,7 +386,6 @@ class LlamaExtractClient:
426
386
  _response = self._client_wrapper.httpx_client.request(
427
387
  "GET",
428
388
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/extractionv2/jobs/{job_id}"),
429
- params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
430
389
  headers=self._client_wrapper.get_headers(),
431
390
  timeout=60,
432
391
  )
@@ -441,19 +400,10 @@ class LlamaExtractClient:
441
400
  raise ApiError(status_code=_response.status_code, body=_response_json)
442
401
 
443
402
  def run_job_with_parsed_file_test(
444
- self,
445
- *,
446
- project_id: typing.Optional[str] = None,
447
- organization_id: typing.Optional[str] = None,
448
- job_create: ExtractJobCreate,
449
- extract_settings: typing.Optional[LlamaExtractSettings] = OMIT,
450
- ) -> typing.Optional[ExtractResultset]:
403
+ self, *, job_create: ExtractJobCreate, extract_settings: typing.Optional[LlamaExtractSettings] = OMIT
404
+ ) -> ExtractResultset:
451
405
  """
452
406
  Parameters:
453
- - project_id: typing.Optional[str].
454
-
455
- - organization_id: typing.Optional[str].
456
-
457
407
  - job_create: ExtractJobCreate.
458
408
 
459
409
  - extract_settings: typing.Optional[LlamaExtractSettings].
@@ -463,6 +413,7 @@ class LlamaExtractClient:
463
413
  ExtractJobCreate,
464
414
  ExtractMode,
465
415
  LlamaExtractSettings,
416
+ LlamaParseParameters,
466
417
  )
467
418
  from llama_cloud.client import LlamaCloud
468
419
 
@@ -477,7 +428,9 @@ class LlamaExtractClient:
477
428
  extraction_mode=ExtractMode.PER_DOC,
478
429
  ),
479
430
  ),
480
- extract_settings=LlamaExtractSettings(),
431
+ extract_settings=LlamaExtractSettings(
432
+ llama_parse_params=LlamaParseParameters(),
433
+ ),
481
434
  )
482
435
  """
483
436
  _request: typing.Dict[str, typing.Any] = {"job_create": job_create}
@@ -486,13 +439,12 @@ class LlamaExtractClient:
486
439
  _response = self._client_wrapper.httpx_client.request(
487
440
  "POST",
488
441
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/jobs/parsed/test"),
489
- params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
490
442
  json=jsonable_encoder(_request),
491
443
  headers=self._client_wrapper.get_headers(),
492
444
  timeout=60,
493
445
  )
494
446
  if 200 <= _response.status_code < 300:
495
- return pydantic.parse_obj_as(typing.Optional[ExtractResultset], _response.json()) # type: ignore
447
+ return pydantic.parse_obj_as(ExtractResultset, _response.json()) # type: ignore
496
448
  if _response.status_code == 422:
497
449
  raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
498
450
  try:
@@ -501,19 +453,9 @@ class LlamaExtractClient:
501
453
  raise ApiError(status_code=_response.status_code, body=_response.text)
502
454
  raise ApiError(status_code=_response.status_code, body=_response_json)
503
455
 
504
- def run_job_with_parsed_file(
505
- self,
506
- *,
507
- project_id: typing.Optional[str] = None,
508
- organization_id: typing.Optional[str] = None,
509
- request: ExtractJobCreate,
510
- ) -> typing.Optional[ExtractResultset]:
456
+ def run_job_with_parsed_file(self, *, request: ExtractJobCreate) -> ExtractResultset:
511
457
  """
512
458
  Parameters:
513
- - project_id: typing.Optional[str].
514
-
515
- - organization_id: typing.Optional[str].
516
-
517
459
  - request: ExtractJobCreate.
518
460
  ---
519
461
  from llama_cloud import ExtractConfig, ExtractJobCreate, ExtractMode
@@ -535,13 +477,12 @@ class LlamaExtractClient:
535
477
  _response = self._client_wrapper.httpx_client.request(
536
478
  "POST",
537
479
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/jobs/parsed"),
538
- params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
539
480
  json=jsonable_encoder(request),
540
481
  headers=self._client_wrapper.get_headers(),
541
482
  timeout=60,
542
483
  )
543
484
  if 200 <= _response.status_code < 300:
544
- return pydantic.parse_obj_as(typing.Optional[ExtractResultset], _response.json()) # type: ignore
485
+ return pydantic.parse_obj_as(ExtractResultset, _response.json()) # type: ignore
545
486
  if _response.status_code == 422:
546
487
  raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
547
488
  try:
@@ -551,19 +492,10 @@ class LlamaExtractClient:
551
492
  raise ApiError(status_code=_response.status_code, body=_response_json)
552
493
 
553
494
  def run_job_test_user(
554
- self,
555
- *,
556
- project_id: typing.Optional[str] = None,
557
- organization_id: typing.Optional[str] = None,
558
- job_create: ExtractJobCreate,
559
- extract_settings: typing.Optional[LlamaExtractSettings] = OMIT,
495
+ self, *, job_create: ExtractJobCreate, extract_settings: typing.Optional[LlamaExtractSettings] = OMIT
560
496
  ) -> ExtractJob:
561
497
  """
562
498
  Parameters:
563
- - project_id: typing.Optional[str].
564
-
565
- - organization_id: typing.Optional[str].
566
-
567
499
  - job_create: ExtractJobCreate.
568
500
 
569
501
  - extract_settings: typing.Optional[LlamaExtractSettings].
@@ -573,6 +505,7 @@ class LlamaExtractClient:
573
505
  ExtractJobCreate,
574
506
  ExtractMode,
575
507
  LlamaExtractSettings,
508
+ LlamaParseParameters,
576
509
  )
577
510
  from llama_cloud.client import LlamaCloud
578
511
 
@@ -587,7 +520,9 @@ class LlamaExtractClient:
587
520
  extraction_mode=ExtractMode.PER_DOC,
588
521
  ),
589
522
  ),
590
- extract_settings=LlamaExtractSettings(),
523
+ extract_settings=LlamaExtractSettings(
524
+ llama_parse_params=LlamaParseParameters(),
525
+ ),
591
526
  )
592
527
  """
593
528
  _request: typing.Dict[str, typing.Any] = {"job_create": job_create}
@@ -596,7 +531,6 @@ class LlamaExtractClient:
596
531
  _response = self._client_wrapper.httpx_client.request(
597
532
  "POST",
598
533
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/jobs/test"),
599
- params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
600
534
  json=jsonable_encoder(_request),
601
535
  headers=self._client_wrapper.get_headers(),
602
536
  timeout=60,
@@ -611,44 +545,61 @@ class LlamaExtractClient:
611
545
  raise ApiError(status_code=_response.status_code, body=_response.text)
612
546
  raise ApiError(status_code=_response.status_code, body=_response_json)
613
547
 
614
- def run_jobs_in_batch(
615
- self,
616
- *,
617
- project_id: typing.Optional[str] = None,
618
- organization_id: typing.Optional[str] = None,
619
- extraction_agent_id: str,
620
- file_ids: typing.List[str],
621
- ) -> typing.List[ExtractJob]:
548
+ def get_job_result(self, job_id: str) -> ExtractResultset:
622
549
  """
623
550
  Parameters:
624
- - project_id: typing.Optional[str].
625
-
626
- - organization_id: typing.Optional[str].
551
+ - job_id: str.
552
+ ---
553
+ from llama_cloud.client import LlamaCloud
627
554
 
628
- - extraction_agent_id: str. The id of the extraction agent
555
+ client = LlamaCloud(
556
+ token="YOUR_TOKEN",
557
+ )
558
+ client.llama_extract.get_job_result(
559
+ job_id="string",
560
+ )
561
+ """
562
+ _response = self._client_wrapper.httpx_client.request(
563
+ "GET",
564
+ urllib.parse.urljoin(
565
+ f"{self._client_wrapper.get_base_url()}/", f"api/v1/extractionv2/jobs/{job_id}/result"
566
+ ),
567
+ headers=self._client_wrapper.get_headers(),
568
+ timeout=60,
569
+ )
570
+ if 200 <= _response.status_code < 300:
571
+ return pydantic.parse_obj_as(ExtractResultset, _response.json()) # type: ignore
572
+ if _response.status_code == 422:
573
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
574
+ try:
575
+ _response_json = _response.json()
576
+ except JSONDecodeError:
577
+ raise ApiError(status_code=_response.status_code, body=_response.text)
578
+ raise ApiError(status_code=_response.status_code, body=_response_json)
629
579
 
630
- - file_ids: typing.List[str]. The ids of the files
580
+ def list_extract_runs(self, *, extraction_agent_id: str) -> typing.List[ExtractRun]:
581
+ """
582
+ Parameters:
583
+ - extraction_agent_id: str.
631
584
  ---
632
585
  from llama_cloud.client import LlamaCloud
633
586
 
634
587
  client = LlamaCloud(
635
588
  token="YOUR_TOKEN",
636
589
  )
637
- client.llama_extract.run_jobs_in_batch(
590
+ client.llama_extract.list_extract_runs(
638
591
  extraction_agent_id="string",
639
- file_ids=[],
640
592
  )
641
593
  """
642
594
  _response = self._client_wrapper.httpx_client.request(
643
- "POST",
644
- urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/jobs/batch"),
645
- params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
646
- json=jsonable_encoder({"extraction_agent_id": extraction_agent_id, "file_ids": file_ids}),
595
+ "GET",
596
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/runs"),
597
+ params=remove_none_from_dict({"extraction_agent_id": extraction_agent_id}),
647
598
  headers=self._client_wrapper.get_headers(),
648
599
  timeout=60,
649
600
  )
650
601
  if 200 <= _response.status_code < 300:
651
- return pydantic.parse_obj_as(typing.List[ExtractJob], _response.json()) # type: ignore
602
+ return pydantic.parse_obj_as(typing.List[ExtractRun], _response.json()) # type: ignore
652
603
  if _response.status_code == 422:
653
604
  raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
654
605
  try:
@@ -657,37 +608,30 @@ class LlamaExtractClient:
657
608
  raise ApiError(status_code=_response.status_code, body=_response.text)
658
609
  raise ApiError(status_code=_response.status_code, body=_response_json)
659
610
 
660
- def get_job_result(
661
- self, job_id: str, *, project_id: typing.Optional[str] = None, organization_id: typing.Optional[str] = None
662
- ) -> ExtractResultset:
611
+ def get_run_by_job_id(self, job_id: str) -> ExtractRun:
663
612
  """
664
613
  Parameters:
665
614
  - job_id: str.
666
-
667
- - project_id: typing.Optional[str].
668
-
669
- - organization_id: typing.Optional[str].
670
615
  ---
671
616
  from llama_cloud.client import LlamaCloud
672
617
 
673
618
  client = LlamaCloud(
674
619
  token="YOUR_TOKEN",
675
620
  )
676
- client.llama_extract.get_job_result(
621
+ client.llama_extract.get_run_by_job_id(
677
622
  job_id="string",
678
623
  )
679
624
  """
680
625
  _response = self._client_wrapper.httpx_client.request(
681
626
  "GET",
682
627
  urllib.parse.urljoin(
683
- f"{self._client_wrapper.get_base_url()}/", f"api/v1/extractionv2/jobs/{job_id}/result"
628
+ f"{self._client_wrapper.get_base_url()}/", f"api/v1/extractionv2/runs/by-job/{job_id}"
684
629
  ),
685
- params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
686
630
  headers=self._client_wrapper.get_headers(),
687
631
  timeout=60,
688
632
  )
689
633
  if 200 <= _response.status_code < 300:
690
- return pydantic.parse_obj_as(ExtractResultset, _response.json()) # type: ignore
634
+ return pydantic.parse_obj_as(ExtractRun, _response.json()) # type: ignore
691
635
  if _response.status_code == 422:
692
636
  raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
693
637
  try:
@@ -696,30 +640,23 @@ class LlamaExtractClient:
696
640
  raise ApiError(status_code=_response.status_code, body=_response.text)
697
641
  raise ApiError(status_code=_response.status_code, body=_response_json)
698
642
 
699
- def get_extract_run_api_v_1_extractionv_2_runs_run_id_get(
700
- self, run_id: str, *, project_id: typing.Optional[str] = None, organization_id: typing.Optional[str] = None
701
- ) -> ExtractRun:
643
+ def get_run(self, run_id: str) -> ExtractRun:
702
644
  """
703
645
  Parameters:
704
646
  - run_id: str.
705
-
706
- - project_id: typing.Optional[str].
707
-
708
- - organization_id: typing.Optional[str].
709
647
  ---
710
648
  from llama_cloud.client import LlamaCloud
711
649
 
712
650
  client = LlamaCloud(
713
651
  token="YOUR_TOKEN",
714
652
  )
715
- client.llama_extract.get_extract_run_api_v_1_extractionv_2_runs_run_id_get(
653
+ client.llama_extract.get_run(
716
654
  run_id="string",
717
655
  )
718
656
  """
719
657
  _response = self._client_wrapper.httpx_client.request(
720
658
  "GET",
721
659
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/extractionv2/runs/{run_id}"),
722
- params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
723
660
  headers=self._client_wrapper.get_headers(),
724
661
  timeout=60,
725
662
  )
@@ -738,13 +675,42 @@ class AsyncLlamaExtractClient:
738
675
  def __init__(self, *, client_wrapper: AsyncClientWrapper):
739
676
  self._client_wrapper = client_wrapper
740
677
 
678
+ async def list_extraction_agents(self, *, project_id: typing.Optional[str] = None) -> typing.List[ExtractAgent]:
679
+ """
680
+ Parameters:
681
+ - project_id: typing.Optional[str].
682
+ ---
683
+ from llama_cloud.client import AsyncLlamaCloud
684
+
685
+ client = AsyncLlamaCloud(
686
+ token="YOUR_TOKEN",
687
+ )
688
+ await client.llama_extract.list_extraction_agents()
689
+ """
690
+ _response = await self._client_wrapper.httpx_client.request(
691
+ "GET",
692
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/extraction-agents"),
693
+ params=remove_none_from_dict({"project_id": project_id}),
694
+ headers=self._client_wrapper.get_headers(),
695
+ timeout=60,
696
+ )
697
+ if 200 <= _response.status_code < 300:
698
+ return pydantic.parse_obj_as(typing.List[ExtractAgent], _response.json()) # type: ignore
699
+ if _response.status_code == 422:
700
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
701
+ try:
702
+ _response_json = _response.json()
703
+ except JSONDecodeError:
704
+ raise ApiError(status_code=_response.status_code, body=_response.text)
705
+ raise ApiError(status_code=_response.status_code, body=_response_json)
706
+
741
707
  async def create_extraction_agent(
742
708
  self,
743
709
  *,
744
710
  project_id: typing.Optional[str] = None,
745
711
  organization_id: typing.Optional[str] = None,
746
712
  name: str,
747
- data_schema: typing.Dict[str, typing.Optional[ExtractAgentCreateDataSchemaValue]],
713
+ data_schema: ExtractAgentCreateDataSchema,
748
714
  config: ExtractConfig,
749
715
  ) -> ExtractAgent:
750
716
  """
@@ -755,7 +721,7 @@ class AsyncLlamaExtractClient:
755
721
 
756
722
  - name: str. The name of the extraction schema
757
723
 
758
- - data_schema: typing.Dict[str, typing.Optional[ExtractAgentCreateDataSchemaValue]]. The schema of the data.
724
+ - data_schema: ExtractAgentCreateDataSchema. The schema of the data.
759
725
 
760
726
  - config: ExtractConfig. The configuration parameters for the extraction agent.
761
727
  ---
@@ -767,7 +733,6 @@ class AsyncLlamaExtractClient:
767
733
  )
768
734
  await client.llama_extract.create_extraction_agent(
769
735
  name="string",
770
- data_schema={},
771
736
  config=ExtractConfig(
772
737
  extraction_mode=ExtractMode.PER_DOC,
773
738
  ),
@@ -775,7 +740,7 @@ class AsyncLlamaExtractClient:
775
740
  """
776
741
  _response = await self._client_wrapper.httpx_client.request(
777
742
  "POST",
778
- urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/extraction_agent"),
743
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/extraction-agents"),
779
744
  params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
780
745
  json=jsonable_encoder({"name": name, "data_schema": data_schema, "config": config}),
781
746
  headers=self._client_wrapper.get_headers(),
@@ -791,33 +756,34 @@ class AsyncLlamaExtractClient:
791
756
  raise ApiError(status_code=_response.status_code, body=_response.text)
792
757
  raise ApiError(status_code=_response.status_code, body=_response_json)
793
758
 
794
- async def list_extraction_agents(
795
- self, *, project_id: str, organization_id: typing.Optional[str] = None
796
- ) -> typing.List[ExtractAgent]:
759
+ async def validate_extraction_schema(
760
+ self, *, data_schema: ExtractSchemaValidateRequestDataSchema
761
+ ) -> ExtractSchemaValidateResponse:
797
762
  """
798
- Parameters:
799
- - project_id: str.
763
+ Validates an extraction agent's schema definition.
764
+ Returns the normalized and validated schema if valid, otherwise raises an HTTP 400.
800
765
 
801
- - organization_id: typing.Optional[str].
766
+ Parameters:
767
+ - data_schema: ExtractSchemaValidateRequestDataSchema.
802
768
  ---
803
769
  from llama_cloud.client import AsyncLlamaCloud
804
770
 
805
771
  client = AsyncLlamaCloud(
806
772
  token="YOUR_TOKEN",
807
773
  )
808
- await client.llama_extract.list_extraction_agents(
809
- project_id="string",
810
- )
774
+ await client.llama_extract.validate_extraction_schema()
811
775
  """
812
776
  _response = await self._client_wrapper.httpx_client.request(
813
- "GET",
814
- urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/extraction_agents"),
815
- params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
777
+ "POST",
778
+ urllib.parse.urljoin(
779
+ f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/extraction-agents/schema/validation"
780
+ ),
781
+ json=jsonable_encoder({"data_schema": data_schema}),
816
782
  headers=self._client_wrapper.get_headers(),
817
783
  timeout=60,
818
784
  )
819
785
  if 200 <= _response.status_code < 300:
820
- return pydantic.parse_obj_as(typing.List[ExtractAgent], _response.json()) # type: ignore
786
+ return pydantic.parse_obj_as(ExtractSchemaValidateResponse, _response.json()) # type: ignore
821
787
  if _response.status_code == 422:
822
788
  raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
823
789
  try:
@@ -826,16 +792,12 @@ class AsyncLlamaExtractClient:
826
792
  raise ApiError(status_code=_response.status_code, body=_response.text)
827
793
  raise ApiError(status_code=_response.status_code, body=_response_json)
828
794
 
829
- async def get_extraction_agent_by_name(
830
- self, *, name: str, project_id: typing.Optional[str] = None, organization_id: typing.Optional[str] = None
831
- ) -> ExtractAgent:
795
+ async def get_extraction_agent_by_name(self, name: str, *, project_id: typing.Optional[str] = None) -> ExtractAgent:
832
796
  """
833
797
  Parameters:
834
798
  - name: str.
835
799
 
836
800
  - project_id: typing.Optional[str].
837
-
838
- - organization_id: typing.Optional[str].
839
801
  ---
840
802
  from llama_cloud.client import AsyncLlamaCloud
841
803
 
@@ -849,9 +811,9 @@ class AsyncLlamaExtractClient:
849
811
  _response = await self._client_wrapper.httpx_client.request(
850
812
  "GET",
851
813
  urllib.parse.urljoin(
852
- f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/extraction_agents/by_name"
814
+ f"{self._client_wrapper.get_base_url()}/", f"api/v1/extractionv2/extraction-agents/by-name/{name}"
853
815
  ),
854
- params=remove_none_from_dict({"name": name, "project_id": project_id, "organization_id": organization_id}),
816
+ params=remove_none_from_dict({"project_id": project_id}),
855
817
  headers=self._client_wrapper.get_headers(),
856
818
  timeout=60,
857
819
  )
@@ -865,20 +827,10 @@ class AsyncLlamaExtractClient:
865
827
  raise ApiError(status_code=_response.status_code, body=_response.text)
866
828
  raise ApiError(status_code=_response.status_code, body=_response_json)
867
829
 
868
- async def get_extraction_agent(
869
- self,
870
- extraction_agent_id: str,
871
- *,
872
- project_id: typing.Optional[str] = None,
873
- organization_id: typing.Optional[str] = None,
874
- ) -> ExtractAgent:
830
+ async def get_extraction_agent(self, extraction_agent_id: str) -> ExtractAgent:
875
831
  """
876
832
  Parameters:
877
833
  - extraction_agent_id: str.
878
-
879
- - project_id: typing.Optional[str].
880
-
881
- - organization_id: typing.Optional[str].
882
834
  ---
883
835
  from llama_cloud.client import AsyncLlamaCloud
884
836
 
@@ -893,9 +845,8 @@ class AsyncLlamaExtractClient:
893
845
  "GET",
894
846
  urllib.parse.urljoin(
895
847
  f"{self._client_wrapper.get_base_url()}/",
896
- f"api/v1/extractionv2/extraction_agents/{extraction_agent_id}",
848
+ f"api/v1/extractionv2/extraction-agents/{extraction_agent_id}",
897
849
  ),
898
- params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
899
850
  headers=self._client_wrapper.get_headers(),
900
851
  timeout=60,
901
852
  )
@@ -910,23 +861,13 @@ class AsyncLlamaExtractClient:
910
861
  raise ApiError(status_code=_response.status_code, body=_response_json)
911
862
 
912
863
  async def update_extraction_agent(
913
- self,
914
- extraction_agent_id: str,
915
- *,
916
- project_id: typing.Optional[str] = None,
917
- organization_id: typing.Optional[str] = None,
918
- data_schema: typing.Dict[str, typing.Optional[ExtractAgentUpdateDataSchemaValue]],
919
- config: ExtractConfig,
864
+ self, extraction_agent_id: str, *, data_schema: ExtractAgentUpdateDataSchema, config: ExtractConfig
920
865
  ) -> ExtractAgent:
921
866
  """
922
867
  Parameters:
923
868
  - extraction_agent_id: str.
924
869
 
925
- - project_id: typing.Optional[str].
926
-
927
- - organization_id: typing.Optional[str].
928
-
929
- - data_schema: typing.Dict[str, typing.Optional[ExtractAgentUpdateDataSchemaValue]]. The schema of the data
870
+ - data_schema: ExtractAgentUpdateDataSchema. The schema of the data
930
871
 
931
872
  - config: ExtractConfig. The configuration parameters for the extraction agent.
932
873
  ---
@@ -938,7 +879,6 @@ class AsyncLlamaExtractClient:
938
879
  )
939
880
  await client.llama_extract.update_extraction_agent(
940
881
  extraction_agent_id="string",
941
- data_schema={},
942
882
  config=ExtractConfig(
943
883
  extraction_mode=ExtractMode.PER_DOC,
944
884
  ),
@@ -948,9 +888,8 @@ class AsyncLlamaExtractClient:
948
888
  "PUT",
949
889
  urllib.parse.urljoin(
950
890
  f"{self._client_wrapper.get_base_url()}/",
951
- f"api/v1/extractionv2/extraction_agents/{extraction_agent_id}",
891
+ f"api/v1/extractionv2/extraction-agents/{extraction_agent_id}",
952
892
  ),
953
- params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
954
893
  json=jsonable_encoder({"data_schema": data_schema, "config": config}),
955
894
  headers=self._client_wrapper.get_headers(),
956
895
  timeout=60,
@@ -965,20 +904,10 @@ class AsyncLlamaExtractClient:
965
904
  raise ApiError(status_code=_response.status_code, body=_response.text)
966
905
  raise ApiError(status_code=_response.status_code, body=_response_json)
967
906
 
968
- async def delete_extraction_agent(
969
- self,
970
- extraction_agent_id: str,
971
- *,
972
- project_id: typing.Optional[str] = None,
973
- organization_id: typing.Optional[str] = None,
974
- ) -> typing.Any:
907
+ async def delete_extraction_agent(self, extraction_agent_id: str) -> typing.Any:
975
908
  """
976
909
  Parameters:
977
910
  - extraction_agent_id: str.
978
-
979
- - project_id: typing.Optional[str].
980
-
981
- - organization_id: typing.Optional[str].
982
911
  ---
983
912
  from llama_cloud.client import AsyncLlamaCloud
984
913
 
@@ -993,9 +922,8 @@ class AsyncLlamaExtractClient:
993
922
  "DELETE",
994
923
  urllib.parse.urljoin(
995
924
  f"{self._client_wrapper.get_base_url()}/",
996
- f"api/v1/extractionv2/extraction_agents/{extraction_agent_id}",
925
+ f"api/v1/extractionv2/extraction-agents/{extraction_agent_id}",
997
926
  ),
998
- params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
999
927
  headers=self._client_wrapper.get_headers(),
1000
928
  timeout=60,
1001
929
  )
@@ -1009,20 +937,10 @@ class AsyncLlamaExtractClient:
1009
937
  raise ApiError(status_code=_response.status_code, body=_response.text)
1010
938
  raise ApiError(status_code=_response.status_code, body=_response_json)
1011
939
 
1012
- async def list_jobs(
1013
- self,
1014
- *,
1015
- extraction_agent_id: str,
1016
- project_id: typing.Optional[str] = None,
1017
- organization_id: typing.Optional[str] = None,
1018
- ) -> typing.List[ExtractJob]:
940
+ async def list_jobs(self, *, extraction_agent_id: str) -> typing.List[ExtractJob]:
1019
941
  """
1020
942
  Parameters:
1021
943
  - extraction_agent_id: str.
1022
-
1023
- - project_id: typing.Optional[str].
1024
-
1025
- - organization_id: typing.Optional[str].
1026
944
  ---
1027
945
  from llama_cloud.client import AsyncLlamaCloud
1028
946
 
@@ -1036,13 +954,7 @@ class AsyncLlamaExtractClient:
1036
954
  _response = await self._client_wrapper.httpx_client.request(
1037
955
  "GET",
1038
956
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/jobs"),
1039
- params=remove_none_from_dict(
1040
- {
1041
- "extraction_agent_id": extraction_agent_id,
1042
- "project_id": project_id,
1043
- "organization_id": organization_id,
1044
- }
1045
- ),
957
+ params=remove_none_from_dict({"extraction_agent_id": extraction_agent_id}),
1046
958
  headers=self._client_wrapper.get_headers(),
1047
959
  timeout=60,
1048
960
  )
@@ -1056,19 +968,9 @@ class AsyncLlamaExtractClient:
1056
968
  raise ApiError(status_code=_response.status_code, body=_response.text)
1057
969
  raise ApiError(status_code=_response.status_code, body=_response_json)
1058
970
 
1059
- async def run_job(
1060
- self,
1061
- *,
1062
- project_id: typing.Optional[str] = None,
1063
- organization_id: typing.Optional[str] = None,
1064
- request: ExtractJobCreate,
1065
- ) -> ExtractJob:
971
+ async def run_job(self, *, request: ExtractJobCreate) -> ExtractJob:
1066
972
  """
1067
973
  Parameters:
1068
- - project_id: typing.Optional[str].
1069
-
1070
- - organization_id: typing.Optional[str].
1071
-
1072
974
  - request: ExtractJobCreate.
1073
975
  ---
1074
976
  from llama_cloud import ExtractConfig, ExtractJobCreate, ExtractMode
@@ -1090,7 +992,6 @@ class AsyncLlamaExtractClient:
1090
992
  _response = await self._client_wrapper.httpx_client.request(
1091
993
  "POST",
1092
994
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/jobs"),
1093
- params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
1094
995
  json=jsonable_encoder(request),
1095
996
  headers=self._client_wrapper.get_headers(),
1096
997
  timeout=60,
@@ -1105,16 +1006,10 @@ class AsyncLlamaExtractClient:
1105
1006
  raise ApiError(status_code=_response.status_code, body=_response.text)
1106
1007
  raise ApiError(status_code=_response.status_code, body=_response_json)
1107
1008
 
1108
- async def get_job(
1109
- self, job_id: str, *, project_id: typing.Optional[str] = None, organization_id: typing.Optional[str] = None
1110
- ) -> ExtractJob:
1009
+ async def get_job(self, job_id: str) -> ExtractJob:
1111
1010
  """
1112
1011
  Parameters:
1113
1012
  - job_id: str.
1114
-
1115
- - project_id: typing.Optional[str].
1116
-
1117
- - organization_id: typing.Optional[str].
1118
1013
  ---
1119
1014
  from llama_cloud.client import AsyncLlamaCloud
1120
1015
 
@@ -1128,7 +1023,6 @@ class AsyncLlamaExtractClient:
1128
1023
  _response = await self._client_wrapper.httpx_client.request(
1129
1024
  "GET",
1130
1025
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/extractionv2/jobs/{job_id}"),
1131
- params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
1132
1026
  headers=self._client_wrapper.get_headers(),
1133
1027
  timeout=60,
1134
1028
  )
@@ -1143,19 +1037,10 @@ class AsyncLlamaExtractClient:
1143
1037
  raise ApiError(status_code=_response.status_code, body=_response_json)
1144
1038
 
1145
1039
  async def run_job_with_parsed_file_test(
1146
- self,
1147
- *,
1148
- project_id: typing.Optional[str] = None,
1149
- organization_id: typing.Optional[str] = None,
1150
- job_create: ExtractJobCreate,
1151
- extract_settings: typing.Optional[LlamaExtractSettings] = OMIT,
1152
- ) -> typing.Optional[ExtractResultset]:
1040
+ self, *, job_create: ExtractJobCreate, extract_settings: typing.Optional[LlamaExtractSettings] = OMIT
1041
+ ) -> ExtractResultset:
1153
1042
  """
1154
1043
  Parameters:
1155
- - project_id: typing.Optional[str].
1156
-
1157
- - organization_id: typing.Optional[str].
1158
-
1159
1044
  - job_create: ExtractJobCreate.
1160
1045
 
1161
1046
  - extract_settings: typing.Optional[LlamaExtractSettings].
@@ -1165,6 +1050,7 @@ class AsyncLlamaExtractClient:
1165
1050
  ExtractJobCreate,
1166
1051
  ExtractMode,
1167
1052
  LlamaExtractSettings,
1053
+ LlamaParseParameters,
1168
1054
  )
1169
1055
  from llama_cloud.client import AsyncLlamaCloud
1170
1056
 
@@ -1179,7 +1065,9 @@ class AsyncLlamaExtractClient:
1179
1065
  extraction_mode=ExtractMode.PER_DOC,
1180
1066
  ),
1181
1067
  ),
1182
- extract_settings=LlamaExtractSettings(),
1068
+ extract_settings=LlamaExtractSettings(
1069
+ llama_parse_params=LlamaParseParameters(),
1070
+ ),
1183
1071
  )
1184
1072
  """
1185
1073
  _request: typing.Dict[str, typing.Any] = {"job_create": job_create}
@@ -1188,13 +1076,12 @@ class AsyncLlamaExtractClient:
1188
1076
  _response = await self._client_wrapper.httpx_client.request(
1189
1077
  "POST",
1190
1078
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/jobs/parsed/test"),
1191
- params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
1192
1079
  json=jsonable_encoder(_request),
1193
1080
  headers=self._client_wrapper.get_headers(),
1194
1081
  timeout=60,
1195
1082
  )
1196
1083
  if 200 <= _response.status_code < 300:
1197
- return pydantic.parse_obj_as(typing.Optional[ExtractResultset], _response.json()) # type: ignore
1084
+ return pydantic.parse_obj_as(ExtractResultset, _response.json()) # type: ignore
1198
1085
  if _response.status_code == 422:
1199
1086
  raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
1200
1087
  try:
@@ -1203,19 +1090,9 @@ class AsyncLlamaExtractClient:
1203
1090
  raise ApiError(status_code=_response.status_code, body=_response.text)
1204
1091
  raise ApiError(status_code=_response.status_code, body=_response_json)
1205
1092
 
1206
- async def run_job_with_parsed_file(
1207
- self,
1208
- *,
1209
- project_id: typing.Optional[str] = None,
1210
- organization_id: typing.Optional[str] = None,
1211
- request: ExtractJobCreate,
1212
- ) -> typing.Optional[ExtractResultset]:
1093
+ async def run_job_with_parsed_file(self, *, request: ExtractJobCreate) -> ExtractResultset:
1213
1094
  """
1214
1095
  Parameters:
1215
- - project_id: typing.Optional[str].
1216
-
1217
- - organization_id: typing.Optional[str].
1218
-
1219
1096
  - request: ExtractJobCreate.
1220
1097
  ---
1221
1098
  from llama_cloud import ExtractConfig, ExtractJobCreate, ExtractMode
@@ -1237,13 +1114,12 @@ class AsyncLlamaExtractClient:
1237
1114
  _response = await self._client_wrapper.httpx_client.request(
1238
1115
  "POST",
1239
1116
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/jobs/parsed"),
1240
- params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
1241
1117
  json=jsonable_encoder(request),
1242
1118
  headers=self._client_wrapper.get_headers(),
1243
1119
  timeout=60,
1244
1120
  )
1245
1121
  if 200 <= _response.status_code < 300:
1246
- return pydantic.parse_obj_as(typing.Optional[ExtractResultset], _response.json()) # type: ignore
1122
+ return pydantic.parse_obj_as(ExtractResultset, _response.json()) # type: ignore
1247
1123
  if _response.status_code == 422:
1248
1124
  raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
1249
1125
  try:
@@ -1253,19 +1129,10 @@ class AsyncLlamaExtractClient:
1253
1129
  raise ApiError(status_code=_response.status_code, body=_response_json)
1254
1130
 
1255
1131
  async def run_job_test_user(
1256
- self,
1257
- *,
1258
- project_id: typing.Optional[str] = None,
1259
- organization_id: typing.Optional[str] = None,
1260
- job_create: ExtractJobCreate,
1261
- extract_settings: typing.Optional[LlamaExtractSettings] = OMIT,
1132
+ self, *, job_create: ExtractJobCreate, extract_settings: typing.Optional[LlamaExtractSettings] = OMIT
1262
1133
  ) -> ExtractJob:
1263
1134
  """
1264
1135
  Parameters:
1265
- - project_id: typing.Optional[str].
1266
-
1267
- - organization_id: typing.Optional[str].
1268
-
1269
1136
  - job_create: ExtractJobCreate.
1270
1137
 
1271
1138
  - extract_settings: typing.Optional[LlamaExtractSettings].
@@ -1275,6 +1142,7 @@ class AsyncLlamaExtractClient:
1275
1142
  ExtractJobCreate,
1276
1143
  ExtractMode,
1277
1144
  LlamaExtractSettings,
1145
+ LlamaParseParameters,
1278
1146
  )
1279
1147
  from llama_cloud.client import AsyncLlamaCloud
1280
1148
 
@@ -1289,7 +1157,9 @@ class AsyncLlamaExtractClient:
1289
1157
  extraction_mode=ExtractMode.PER_DOC,
1290
1158
  ),
1291
1159
  ),
1292
- extract_settings=LlamaExtractSettings(),
1160
+ extract_settings=LlamaExtractSettings(
1161
+ llama_parse_params=LlamaParseParameters(),
1162
+ ),
1293
1163
  )
1294
1164
  """
1295
1165
  _request: typing.Dict[str, typing.Any] = {"job_create": job_create}
@@ -1298,7 +1168,6 @@ class AsyncLlamaExtractClient:
1298
1168
  _response = await self._client_wrapper.httpx_client.request(
1299
1169
  "POST",
1300
1170
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/jobs/test"),
1301
- params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
1302
1171
  json=jsonable_encoder(_request),
1303
1172
  headers=self._client_wrapper.get_headers(),
1304
1173
  timeout=60,
@@ -1313,44 +1182,61 @@ class AsyncLlamaExtractClient:
1313
1182
  raise ApiError(status_code=_response.status_code, body=_response.text)
1314
1183
  raise ApiError(status_code=_response.status_code, body=_response_json)
1315
1184
 
1316
- async def run_jobs_in_batch(
1317
- self,
1318
- *,
1319
- project_id: typing.Optional[str] = None,
1320
- organization_id: typing.Optional[str] = None,
1321
- extraction_agent_id: str,
1322
- file_ids: typing.List[str],
1323
- ) -> typing.List[ExtractJob]:
1185
+ async def get_job_result(self, job_id: str) -> ExtractResultset:
1324
1186
  """
1325
1187
  Parameters:
1326
- - project_id: typing.Optional[str].
1327
-
1328
- - organization_id: typing.Optional[str].
1188
+ - job_id: str.
1189
+ ---
1190
+ from llama_cloud.client import AsyncLlamaCloud
1329
1191
 
1330
- - extraction_agent_id: str. The id of the extraction agent
1192
+ client = AsyncLlamaCloud(
1193
+ token="YOUR_TOKEN",
1194
+ )
1195
+ await client.llama_extract.get_job_result(
1196
+ job_id="string",
1197
+ )
1198
+ """
1199
+ _response = await self._client_wrapper.httpx_client.request(
1200
+ "GET",
1201
+ urllib.parse.urljoin(
1202
+ f"{self._client_wrapper.get_base_url()}/", f"api/v1/extractionv2/jobs/{job_id}/result"
1203
+ ),
1204
+ headers=self._client_wrapper.get_headers(),
1205
+ timeout=60,
1206
+ )
1207
+ if 200 <= _response.status_code < 300:
1208
+ return pydantic.parse_obj_as(ExtractResultset, _response.json()) # type: ignore
1209
+ if _response.status_code == 422:
1210
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
1211
+ try:
1212
+ _response_json = _response.json()
1213
+ except JSONDecodeError:
1214
+ raise ApiError(status_code=_response.status_code, body=_response.text)
1215
+ raise ApiError(status_code=_response.status_code, body=_response_json)
1331
1216
 
1332
- - file_ids: typing.List[str]. The ids of the files
1217
+ async def list_extract_runs(self, *, extraction_agent_id: str) -> typing.List[ExtractRun]:
1218
+ """
1219
+ Parameters:
1220
+ - extraction_agent_id: str.
1333
1221
  ---
1334
1222
  from llama_cloud.client import AsyncLlamaCloud
1335
1223
 
1336
1224
  client = AsyncLlamaCloud(
1337
1225
  token="YOUR_TOKEN",
1338
1226
  )
1339
- await client.llama_extract.run_jobs_in_batch(
1227
+ await client.llama_extract.list_extract_runs(
1340
1228
  extraction_agent_id="string",
1341
- file_ids=[],
1342
1229
  )
1343
1230
  """
1344
1231
  _response = await self._client_wrapper.httpx_client.request(
1345
- "POST",
1346
- urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/jobs/batch"),
1347
- params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
1348
- json=jsonable_encoder({"extraction_agent_id": extraction_agent_id, "file_ids": file_ids}),
1232
+ "GET",
1233
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extractionv2/runs"),
1234
+ params=remove_none_from_dict({"extraction_agent_id": extraction_agent_id}),
1349
1235
  headers=self._client_wrapper.get_headers(),
1350
1236
  timeout=60,
1351
1237
  )
1352
1238
  if 200 <= _response.status_code < 300:
1353
- return pydantic.parse_obj_as(typing.List[ExtractJob], _response.json()) # type: ignore
1239
+ return pydantic.parse_obj_as(typing.List[ExtractRun], _response.json()) # type: ignore
1354
1240
  if _response.status_code == 422:
1355
1241
  raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
1356
1242
  try:
@@ -1359,37 +1245,30 @@ class AsyncLlamaExtractClient:
1359
1245
  raise ApiError(status_code=_response.status_code, body=_response.text)
1360
1246
  raise ApiError(status_code=_response.status_code, body=_response_json)
1361
1247
 
1362
- async def get_job_result(
1363
- self, job_id: str, *, project_id: typing.Optional[str] = None, organization_id: typing.Optional[str] = None
1364
- ) -> ExtractResultset:
1248
+ async def get_run_by_job_id(self, job_id: str) -> ExtractRun:
1365
1249
  """
1366
1250
  Parameters:
1367
1251
  - job_id: str.
1368
-
1369
- - project_id: typing.Optional[str].
1370
-
1371
- - organization_id: typing.Optional[str].
1372
1252
  ---
1373
1253
  from llama_cloud.client import AsyncLlamaCloud
1374
1254
 
1375
1255
  client = AsyncLlamaCloud(
1376
1256
  token="YOUR_TOKEN",
1377
1257
  )
1378
- await client.llama_extract.get_job_result(
1258
+ await client.llama_extract.get_run_by_job_id(
1379
1259
  job_id="string",
1380
1260
  )
1381
1261
  """
1382
1262
  _response = await self._client_wrapper.httpx_client.request(
1383
1263
  "GET",
1384
1264
  urllib.parse.urljoin(
1385
- f"{self._client_wrapper.get_base_url()}/", f"api/v1/extractionv2/jobs/{job_id}/result"
1265
+ f"{self._client_wrapper.get_base_url()}/", f"api/v1/extractionv2/runs/by-job/{job_id}"
1386
1266
  ),
1387
- params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
1388
1267
  headers=self._client_wrapper.get_headers(),
1389
1268
  timeout=60,
1390
1269
  )
1391
1270
  if 200 <= _response.status_code < 300:
1392
- return pydantic.parse_obj_as(ExtractResultset, _response.json()) # type: ignore
1271
+ return pydantic.parse_obj_as(ExtractRun, _response.json()) # type: ignore
1393
1272
  if _response.status_code == 422:
1394
1273
  raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
1395
1274
  try:
@@ -1398,30 +1277,23 @@ class AsyncLlamaExtractClient:
1398
1277
  raise ApiError(status_code=_response.status_code, body=_response.text)
1399
1278
  raise ApiError(status_code=_response.status_code, body=_response_json)
1400
1279
 
1401
- async def get_extract_run_api_v_1_extractionv_2_runs_run_id_get(
1402
- self, run_id: str, *, project_id: typing.Optional[str] = None, organization_id: typing.Optional[str] = None
1403
- ) -> ExtractRun:
1280
+ async def get_run(self, run_id: str) -> ExtractRun:
1404
1281
  """
1405
1282
  Parameters:
1406
1283
  - run_id: str.
1407
-
1408
- - project_id: typing.Optional[str].
1409
-
1410
- - organization_id: typing.Optional[str].
1411
1284
  ---
1412
1285
  from llama_cloud.client import AsyncLlamaCloud
1413
1286
 
1414
1287
  client = AsyncLlamaCloud(
1415
1288
  token="YOUR_TOKEN",
1416
1289
  )
1417
- await client.llama_extract.get_extract_run_api_v_1_extractionv_2_runs_run_id_get(
1290
+ await client.llama_extract.get_run(
1418
1291
  run_id="string",
1419
1292
  )
1420
1293
  """
1421
1294
  _response = await self._client_wrapper.httpx_client.request(
1422
1295
  "GET",
1423
1296
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/extractionv2/runs/{run_id}"),
1424
- params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
1425
1297
  headers=self._client_wrapper.get_headers(),
1426
1298
  timeout=60,
1427
1299
  )