llama-cloud 0.0.7__py3-none-any.whl → 0.0.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of llama-cloud might be problematic. Click here for more details.

Files changed (47) hide show
  1. llama_cloud/__init__.py +34 -4
  2. llama_cloud/client.py +6 -0
  3. llama_cloud/resources/__init__.py +16 -1
  4. llama_cloud/resources/data_sinks/client.py +40 -8
  5. llama_cloud/resources/data_sinks/types/data_sink_update_component_one.py +2 -0
  6. llama_cloud/resources/data_sources/client.py +48 -12
  7. llama_cloud/resources/data_sources/types/data_source_update_component_one.py +6 -4
  8. llama_cloud/resources/extraction/__init__.py +5 -0
  9. llama_cloud/resources/extraction/client.py +632 -0
  10. llama_cloud/resources/extraction/types/__init__.py +5 -0
  11. llama_cloud/resources/extraction/types/extraction_schema_update_data_schema_value.py +7 -0
  12. llama_cloud/resources/organizations/__init__.py +2 -0
  13. llama_cloud/resources/organizations/client.py +786 -0
  14. llama_cloud/resources/pipelines/client.py +312 -12
  15. llama_cloud/resources/projects/client.py +28 -8
  16. llama_cloud/types/__init__.py +28 -4
  17. llama_cloud/types/azure_open_ai_embedding.py +3 -0
  18. llama_cloud/types/{cloud_google_drive_data_source.py → chat_params.py} +5 -6
  19. llama_cloud/types/cloud_azure_ai_search_vector_store.py +42 -0
  20. llama_cloud/types/cloud_jira_data_source.py +43 -0
  21. llama_cloud/types/{cloud_gcs_data_source.py → cloud_notion_page_data_source.py} +4 -6
  22. llama_cloud/types/cloud_sharepoint_data_source.py +1 -0
  23. llama_cloud/types/cloud_slack_data_source.py +42 -0
  24. llama_cloud/types/configurable_data_sink_names.py +4 -0
  25. llama_cloud/types/configurable_data_source_names.py +12 -8
  26. llama_cloud/types/data_sink_component_one.py +2 -0
  27. llama_cloud/types/data_sink_create_component_one.py +2 -0
  28. llama_cloud/types/data_source_component_one.py +6 -4
  29. llama_cloud/types/data_source_create_component_one.py +6 -4
  30. llama_cloud/types/eval_dataset_job_record.py +1 -0
  31. llama_cloud/types/extraction_result.py +42 -0
  32. llama_cloud/types/extraction_result_data_value.py +5 -0
  33. llama_cloud/types/extraction_schema.py +43 -0
  34. llama_cloud/types/extraction_schema_data_schema_value.py +7 -0
  35. llama_cloud/types/organization.py +38 -0
  36. llama_cloud/types/organization_create.py +35 -0
  37. llama_cloud/types/pipeline_data_source_component_one.py +6 -4
  38. llama_cloud/types/preset_retrieval_params.py +5 -0
  39. llama_cloud/types/project.py +1 -1
  40. llama_cloud/types/retrieval_mode.py +29 -0
  41. llama_cloud/types/text_node.py +1 -0
  42. llama_cloud/types/user_organization.py +40 -0
  43. llama_cloud/types/user_organization_create.py +36 -0
  44. {llama_cloud-0.0.7.dist-info → llama_cloud-0.0.9.dist-info}/METADATA +1 -1
  45. {llama_cloud-0.0.7.dist-info → llama_cloud-0.0.9.dist-info}/RECORD +47 -29
  46. {llama_cloud-0.0.7.dist-info → llama_cloud-0.0.9.dist-info}/LICENSE +0 -0
  47. {llama_cloud-0.0.7.dist-info → llama_cloud-0.0.9.dist-info}/WHEEL +0 -0
@@ -0,0 +1,632 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+ import urllib.parse
5
+ from json.decoder import JSONDecodeError
6
+
7
+ from ...core.api_error import ApiError
8
+ from ...core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
9
+ from ...core.jsonable_encoder import jsonable_encoder
10
+ from ...core.remove_none_from_dict import remove_none_from_dict
11
+ from ...errors.unprocessable_entity_error import UnprocessableEntityError
12
+ from ...types.extraction_result import ExtractionResult
13
+ from ...types.extraction_schema import ExtractionSchema
14
+ from ...types.http_validation_error import HttpValidationError
15
+ from .types.extraction_schema_update_data_schema_value import ExtractionSchemaUpdateDataSchemaValue
16
+
17
+ try:
18
+ import pydantic
19
+ if pydantic.__version__.startswith("1."):
20
+ raise ImportError
21
+ import pydantic.v1 as pydantic # type: ignore
22
+ except ImportError:
23
+ import pydantic # type: ignore
24
+
25
+ # this is used as the default value for optional parameters
26
+ OMIT = typing.cast(typing.Any, ...)
27
+
28
+
29
+ class ExtractionClient:
30
+ def __init__(self, *, client_wrapper: SyncClientWrapper):
31
+ self._client_wrapper = client_wrapper
32
+
33
+ def infer_schema(
34
+ self, *, name: str, project_id: typing.Optional[str] = OMIT, file_ids: typing.List[str]
35
+ ) -> ExtractionSchema:
36
+ """
37
+ Parameters:
38
+ - name: str. The name of the extraction schema
39
+
40
+ - project_id: typing.Optional[str]. The ID of the project that the extraction schema belongs to
41
+
42
+ - file_ids: typing.List[str]. The IDs of the files that the extraction schema contains
43
+ ---
44
+ from llama_cloud.client import LlamaCloud
45
+
46
+ client = LlamaCloud(
47
+ token="YOUR_TOKEN",
48
+ )
49
+ client.extraction.infer_schema(
50
+ name="string",
51
+ file_ids=[],
52
+ )
53
+ """
54
+ _request: typing.Dict[str, typing.Any] = {"name": name, "file_ids": file_ids}
55
+ if project_id is not OMIT:
56
+ _request["project_id"] = project_id
57
+ _response = self._client_wrapper.httpx_client.request(
58
+ "POST",
59
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extraction/schemas/infer"),
60
+ json=jsonable_encoder(_request),
61
+ headers=self._client_wrapper.get_headers(),
62
+ timeout=60,
63
+ )
64
+ if 200 <= _response.status_code < 300:
65
+ return pydantic.parse_obj_as(ExtractionSchema, _response.json()) # type: ignore
66
+ if _response.status_code == 422:
67
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
68
+ try:
69
+ _response_json = _response.json()
70
+ except JSONDecodeError:
71
+ raise ApiError(status_code=_response.status_code, body=_response.text)
72
+ raise ApiError(status_code=_response.status_code, body=_response_json)
73
+
74
+ def list_schemas(self, *, project_id: typing.Optional[str] = None) -> typing.List[ExtractionSchema]:
75
+ """
76
+ Parameters:
77
+ - project_id: typing.Optional[str].
78
+ ---
79
+ from llama_cloud.client import LlamaCloud
80
+
81
+ client = LlamaCloud(
82
+ token="YOUR_TOKEN",
83
+ )
84
+ client.extraction.list_schemas()
85
+ """
86
+ _response = self._client_wrapper.httpx_client.request(
87
+ "GET",
88
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extraction/schemas"),
89
+ params=remove_none_from_dict({"project_id": project_id}),
90
+ headers=self._client_wrapper.get_headers(),
91
+ timeout=60,
92
+ )
93
+ if 200 <= _response.status_code < 300:
94
+ return pydantic.parse_obj_as(typing.List[ExtractionSchema], _response.json()) # type: ignore
95
+ if _response.status_code == 422:
96
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
97
+ try:
98
+ _response_json = _response.json()
99
+ except JSONDecodeError:
100
+ raise ApiError(status_code=_response.status_code, body=_response.text)
101
+ raise ApiError(status_code=_response.status_code, body=_response_json)
102
+
103
+ def get_schema(self, schema_id: str) -> ExtractionSchema:
104
+ """
105
+ Parameters:
106
+ - schema_id: str.
107
+ ---
108
+ from llama_cloud.client import LlamaCloud
109
+
110
+ client = LlamaCloud(
111
+ token="YOUR_TOKEN",
112
+ )
113
+ client.extraction.get_schema(
114
+ schema_id="string",
115
+ )
116
+ """
117
+ _response = self._client_wrapper.httpx_client.request(
118
+ "GET",
119
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/extraction/schemas/{schema_id}"),
120
+ headers=self._client_wrapper.get_headers(),
121
+ timeout=60,
122
+ )
123
+ if 200 <= _response.status_code < 300:
124
+ return pydantic.parse_obj_as(ExtractionSchema, _response.json()) # type: ignore
125
+ if _response.status_code == 422:
126
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
127
+ try:
128
+ _response_json = _response.json()
129
+ except JSONDecodeError:
130
+ raise ApiError(status_code=_response.status_code, body=_response.text)
131
+ raise ApiError(status_code=_response.status_code, body=_response_json)
132
+
133
+ def update_schema(
134
+ self,
135
+ schema_id: str,
136
+ *,
137
+ data_schema: typing.Optional[typing.Dict[str, ExtractionSchemaUpdateDataSchemaValue]] = OMIT,
138
+ ) -> ExtractionSchema:
139
+ """
140
+ Parameters:
141
+ - schema_id: str.
142
+
143
+ - data_schema: typing.Optional[typing.Dict[str, ExtractionSchemaUpdateDataSchemaValue]]. The schema of the data
144
+ ---
145
+ from llama_cloud.client import LlamaCloud
146
+
147
+ client = LlamaCloud(
148
+ token="YOUR_TOKEN",
149
+ )
150
+ client.extraction.update_schema(
151
+ schema_id="string",
152
+ )
153
+ """
154
+ _request: typing.Dict[str, typing.Any] = {}
155
+ if data_schema is not OMIT:
156
+ _request["data_schema"] = data_schema
157
+ _response = self._client_wrapper.httpx_client.request(
158
+ "PUT",
159
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/extraction/schemas/{schema_id}"),
160
+ json=jsonable_encoder(_request),
161
+ headers=self._client_wrapper.get_headers(),
162
+ timeout=60,
163
+ )
164
+ if 200 <= _response.status_code < 300:
165
+ return pydantic.parse_obj_as(ExtractionSchema, _response.json()) # type: ignore
166
+ if _response.status_code == 422:
167
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
168
+ try:
169
+ _response_json = _response.json()
170
+ except JSONDecodeError:
171
+ raise ApiError(status_code=_response.status_code, body=_response.text)
172
+ raise ApiError(status_code=_response.status_code, body=_response_json)
173
+
174
+ def list_jobs(self, *, schema_id: typing.Optional[str] = None) -> typing.List[ExtractionResult]:
175
+ """
176
+ Parameters:
177
+ - schema_id: typing.Optional[str].
178
+ ---
179
+ from llama_cloud.client import LlamaCloud
180
+
181
+ client = LlamaCloud(
182
+ token="YOUR_TOKEN",
183
+ )
184
+ client.extraction.list_jobs()
185
+ """
186
+ _response = self._client_wrapper.httpx_client.request(
187
+ "GET",
188
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extraction/jobs"),
189
+ params=remove_none_from_dict({"schema_id": schema_id}),
190
+ headers=self._client_wrapper.get_headers(),
191
+ timeout=60,
192
+ )
193
+ if 200 <= _response.status_code < 300:
194
+ return pydantic.parse_obj_as(typing.List[ExtractionResult], _response.json()) # type: ignore
195
+ if _response.status_code == 422:
196
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
197
+ try:
198
+ _response_json = _response.json()
199
+ except JSONDecodeError:
200
+ raise ApiError(status_code=_response.status_code, body=_response.text)
201
+ raise ApiError(status_code=_response.status_code, body=_response_json)
202
+
203
+ def run_job(self, *, schema_id: str, file_id: str) -> ExtractionResult:
204
+ """
205
+ Parameters:
206
+ - schema_id: str. The id of the schema
207
+
208
+ - file_id: str. The id of the file
209
+ ---
210
+ from llama_cloud.client import LlamaCloud
211
+
212
+ client = LlamaCloud(
213
+ token="YOUR_TOKEN",
214
+ )
215
+ client.extraction.run_job(
216
+ schema_id="string",
217
+ file_id="string",
218
+ )
219
+ """
220
+ _response = self._client_wrapper.httpx_client.request(
221
+ "POST",
222
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extraction/jobs"),
223
+ json=jsonable_encoder({"schema_id": schema_id, "file_id": file_id}),
224
+ headers=self._client_wrapper.get_headers(),
225
+ timeout=60,
226
+ )
227
+ if 200 <= _response.status_code < 300:
228
+ return pydantic.parse_obj_as(ExtractionResult, _response.json()) # type: ignore
229
+ if _response.status_code == 422:
230
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
231
+ try:
232
+ _response_json = _response.json()
233
+ except JSONDecodeError:
234
+ raise ApiError(status_code=_response.status_code, body=_response.text)
235
+ raise ApiError(status_code=_response.status_code, body=_response_json)
236
+
237
+ def get_job(self, job_id: str) -> ExtractionResult:
238
+ """
239
+ Parameters:
240
+ - job_id: str.
241
+ ---
242
+ from llama_cloud.client import LlamaCloud
243
+
244
+ client = LlamaCloud(
245
+ token="YOUR_TOKEN",
246
+ )
247
+ client.extraction.get_job(
248
+ job_id="string",
249
+ )
250
+ """
251
+ _response = self._client_wrapper.httpx_client.request(
252
+ "GET",
253
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/extraction/jobs/{job_id}"),
254
+ headers=self._client_wrapper.get_headers(),
255
+ timeout=60,
256
+ )
257
+ if 200 <= _response.status_code < 300:
258
+ return pydantic.parse_obj_as(ExtractionResult, _response.json()) # type: ignore
259
+ if _response.status_code == 422:
260
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
261
+ try:
262
+ _response_json = _response.json()
263
+ except JSONDecodeError:
264
+ raise ApiError(status_code=_response.status_code, body=_response.text)
265
+ raise ApiError(status_code=_response.status_code, body=_response_json)
266
+
267
+ def run_jobs_in_batch(self, *, schema_id: str, file_ids: typing.List[str]) -> typing.List[ExtractionResult]:
268
+ """
269
+ Parameters:
270
+ - schema_id: str. The id of the schema
271
+
272
+ - file_ids: typing.List[str]. The ids of the files
273
+ ---
274
+ from llama_cloud.client import LlamaCloud
275
+
276
+ client = LlamaCloud(
277
+ token="YOUR_TOKEN",
278
+ )
279
+ client.extraction.run_jobs_in_batch(
280
+ schema_id="string",
281
+ file_ids=[],
282
+ )
283
+ """
284
+ _response = self._client_wrapper.httpx_client.request(
285
+ "POST",
286
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extraction/jobs/batch"),
287
+ json=jsonable_encoder({"schema_id": schema_id, "file_ids": file_ids}),
288
+ headers=self._client_wrapper.get_headers(),
289
+ timeout=60,
290
+ )
291
+ if 200 <= _response.status_code < 300:
292
+ return pydantic.parse_obj_as(typing.List[ExtractionResult], _response.json()) # type: ignore
293
+ if _response.status_code == 422:
294
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
295
+ try:
296
+ _response_json = _response.json()
297
+ except JSONDecodeError:
298
+ raise ApiError(status_code=_response.status_code, body=_response.text)
299
+ raise ApiError(status_code=_response.status_code, body=_response_json)
300
+
301
+ def get_job_result(self, job_id: str) -> ExtractionResult:
302
+ """
303
+ Parameters:
304
+ - job_id: str.
305
+ ---
306
+ from llama_cloud.client import LlamaCloud
307
+
308
+ client = LlamaCloud(
309
+ token="YOUR_TOKEN",
310
+ )
311
+ client.extraction.get_job_result(
312
+ job_id="string",
313
+ )
314
+ """
315
+ _response = self._client_wrapper.httpx_client.request(
316
+ "GET",
317
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/extraction/jobs/{job_id}/result"),
318
+ headers=self._client_wrapper.get_headers(),
319
+ timeout=60,
320
+ )
321
+ if 200 <= _response.status_code < 300:
322
+ return pydantic.parse_obj_as(ExtractionResult, _response.json()) # type: ignore
323
+ if _response.status_code == 422:
324
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
325
+ try:
326
+ _response_json = _response.json()
327
+ except JSONDecodeError:
328
+ raise ApiError(status_code=_response.status_code, body=_response.text)
329
+ raise ApiError(status_code=_response.status_code, body=_response_json)
330
+
331
+
332
+ class AsyncExtractionClient:
333
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
334
+ self._client_wrapper = client_wrapper
335
+
336
+ async def infer_schema(
337
+ self, *, name: str, project_id: typing.Optional[str] = OMIT, file_ids: typing.List[str]
338
+ ) -> ExtractionSchema:
339
+ """
340
+ Parameters:
341
+ - name: str. The name of the extraction schema
342
+
343
+ - project_id: typing.Optional[str]. The ID of the project that the extraction schema belongs to
344
+
345
+ - file_ids: typing.List[str]. The IDs of the files that the extraction schema contains
346
+ ---
347
+ from llama_cloud.client import AsyncLlamaCloud
348
+
349
+ client = AsyncLlamaCloud(
350
+ token="YOUR_TOKEN",
351
+ )
352
+ await client.extraction.infer_schema(
353
+ name="string",
354
+ file_ids=[],
355
+ )
356
+ """
357
+ _request: typing.Dict[str, typing.Any] = {"name": name, "file_ids": file_ids}
358
+ if project_id is not OMIT:
359
+ _request["project_id"] = project_id
360
+ _response = await self._client_wrapper.httpx_client.request(
361
+ "POST",
362
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extraction/schemas/infer"),
363
+ json=jsonable_encoder(_request),
364
+ headers=self._client_wrapper.get_headers(),
365
+ timeout=60,
366
+ )
367
+ if 200 <= _response.status_code < 300:
368
+ return pydantic.parse_obj_as(ExtractionSchema, _response.json()) # type: ignore
369
+ if _response.status_code == 422:
370
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
371
+ try:
372
+ _response_json = _response.json()
373
+ except JSONDecodeError:
374
+ raise ApiError(status_code=_response.status_code, body=_response.text)
375
+ raise ApiError(status_code=_response.status_code, body=_response_json)
376
+
377
+ async def list_schemas(self, *, project_id: typing.Optional[str] = None) -> typing.List[ExtractionSchema]:
378
+ """
379
+ Parameters:
380
+ - project_id: typing.Optional[str].
381
+ ---
382
+ from llama_cloud.client import AsyncLlamaCloud
383
+
384
+ client = AsyncLlamaCloud(
385
+ token="YOUR_TOKEN",
386
+ )
387
+ await client.extraction.list_schemas()
388
+ """
389
+ _response = await self._client_wrapper.httpx_client.request(
390
+ "GET",
391
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extraction/schemas"),
392
+ params=remove_none_from_dict({"project_id": project_id}),
393
+ headers=self._client_wrapper.get_headers(),
394
+ timeout=60,
395
+ )
396
+ if 200 <= _response.status_code < 300:
397
+ return pydantic.parse_obj_as(typing.List[ExtractionSchema], _response.json()) # type: ignore
398
+ if _response.status_code == 422:
399
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
400
+ try:
401
+ _response_json = _response.json()
402
+ except JSONDecodeError:
403
+ raise ApiError(status_code=_response.status_code, body=_response.text)
404
+ raise ApiError(status_code=_response.status_code, body=_response_json)
405
+
406
+ async def get_schema(self, schema_id: str) -> ExtractionSchema:
407
+ """
408
+ Parameters:
409
+ - schema_id: str.
410
+ ---
411
+ from llama_cloud.client import AsyncLlamaCloud
412
+
413
+ client = AsyncLlamaCloud(
414
+ token="YOUR_TOKEN",
415
+ )
416
+ await client.extraction.get_schema(
417
+ schema_id="string",
418
+ )
419
+ """
420
+ _response = await self._client_wrapper.httpx_client.request(
421
+ "GET",
422
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/extraction/schemas/{schema_id}"),
423
+ headers=self._client_wrapper.get_headers(),
424
+ timeout=60,
425
+ )
426
+ if 200 <= _response.status_code < 300:
427
+ return pydantic.parse_obj_as(ExtractionSchema, _response.json()) # type: ignore
428
+ if _response.status_code == 422:
429
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
430
+ try:
431
+ _response_json = _response.json()
432
+ except JSONDecodeError:
433
+ raise ApiError(status_code=_response.status_code, body=_response.text)
434
+ raise ApiError(status_code=_response.status_code, body=_response_json)
435
+
436
+ async def update_schema(
437
+ self,
438
+ schema_id: str,
439
+ *,
440
+ data_schema: typing.Optional[typing.Dict[str, ExtractionSchemaUpdateDataSchemaValue]] = OMIT,
441
+ ) -> ExtractionSchema:
442
+ """
443
+ Parameters:
444
+ - schema_id: str.
445
+
446
+ - data_schema: typing.Optional[typing.Dict[str, ExtractionSchemaUpdateDataSchemaValue]]. The schema of the data
447
+ ---
448
+ from llama_cloud.client import AsyncLlamaCloud
449
+
450
+ client = AsyncLlamaCloud(
451
+ token="YOUR_TOKEN",
452
+ )
453
+ await client.extraction.update_schema(
454
+ schema_id="string",
455
+ )
456
+ """
457
+ _request: typing.Dict[str, typing.Any] = {}
458
+ if data_schema is not OMIT:
459
+ _request["data_schema"] = data_schema
460
+ _response = await self._client_wrapper.httpx_client.request(
461
+ "PUT",
462
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/extraction/schemas/{schema_id}"),
463
+ json=jsonable_encoder(_request),
464
+ headers=self._client_wrapper.get_headers(),
465
+ timeout=60,
466
+ )
467
+ if 200 <= _response.status_code < 300:
468
+ return pydantic.parse_obj_as(ExtractionSchema, _response.json()) # type: ignore
469
+ if _response.status_code == 422:
470
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
471
+ try:
472
+ _response_json = _response.json()
473
+ except JSONDecodeError:
474
+ raise ApiError(status_code=_response.status_code, body=_response.text)
475
+ raise ApiError(status_code=_response.status_code, body=_response_json)
476
+
477
+ async def list_jobs(self, *, schema_id: typing.Optional[str] = None) -> typing.List[ExtractionResult]:
478
+ """
479
+ Parameters:
480
+ - schema_id: typing.Optional[str].
481
+ ---
482
+ from llama_cloud.client import AsyncLlamaCloud
483
+
484
+ client = AsyncLlamaCloud(
485
+ token="YOUR_TOKEN",
486
+ )
487
+ await client.extraction.list_jobs()
488
+ """
489
+ _response = await self._client_wrapper.httpx_client.request(
490
+ "GET",
491
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extraction/jobs"),
492
+ params=remove_none_from_dict({"schema_id": schema_id}),
493
+ headers=self._client_wrapper.get_headers(),
494
+ timeout=60,
495
+ )
496
+ if 200 <= _response.status_code < 300:
497
+ return pydantic.parse_obj_as(typing.List[ExtractionResult], _response.json()) # type: ignore
498
+ if _response.status_code == 422:
499
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
500
+ try:
501
+ _response_json = _response.json()
502
+ except JSONDecodeError:
503
+ raise ApiError(status_code=_response.status_code, body=_response.text)
504
+ raise ApiError(status_code=_response.status_code, body=_response_json)
505
+
506
+ async def run_job(self, *, schema_id: str, file_id: str) -> ExtractionResult:
507
+ """
508
+ Parameters:
509
+ - schema_id: str. The id of the schema
510
+
511
+ - file_id: str. The id of the file
512
+ ---
513
+ from llama_cloud.client import AsyncLlamaCloud
514
+
515
+ client = AsyncLlamaCloud(
516
+ token="YOUR_TOKEN",
517
+ )
518
+ await client.extraction.run_job(
519
+ schema_id="string",
520
+ file_id="string",
521
+ )
522
+ """
523
+ _response = await self._client_wrapper.httpx_client.request(
524
+ "POST",
525
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extraction/jobs"),
526
+ json=jsonable_encoder({"schema_id": schema_id, "file_id": file_id}),
527
+ headers=self._client_wrapper.get_headers(),
528
+ timeout=60,
529
+ )
530
+ if 200 <= _response.status_code < 300:
531
+ return pydantic.parse_obj_as(ExtractionResult, _response.json()) # type: ignore
532
+ if _response.status_code == 422:
533
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
534
+ try:
535
+ _response_json = _response.json()
536
+ except JSONDecodeError:
537
+ raise ApiError(status_code=_response.status_code, body=_response.text)
538
+ raise ApiError(status_code=_response.status_code, body=_response_json)
539
+
540
+ async def get_job(self, job_id: str) -> ExtractionResult:
541
+ """
542
+ Parameters:
543
+ - job_id: str.
544
+ ---
545
+ from llama_cloud.client import AsyncLlamaCloud
546
+
547
+ client = AsyncLlamaCloud(
548
+ token="YOUR_TOKEN",
549
+ )
550
+ await client.extraction.get_job(
551
+ job_id="string",
552
+ )
553
+ """
554
+ _response = await self._client_wrapper.httpx_client.request(
555
+ "GET",
556
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/extraction/jobs/{job_id}"),
557
+ headers=self._client_wrapper.get_headers(),
558
+ timeout=60,
559
+ )
560
+ if 200 <= _response.status_code < 300:
561
+ return pydantic.parse_obj_as(ExtractionResult, _response.json()) # type: ignore
562
+ if _response.status_code == 422:
563
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
564
+ try:
565
+ _response_json = _response.json()
566
+ except JSONDecodeError:
567
+ raise ApiError(status_code=_response.status_code, body=_response.text)
568
+ raise ApiError(status_code=_response.status_code, body=_response_json)
569
+
570
+ async def run_jobs_in_batch(self, *, schema_id: str, file_ids: typing.List[str]) -> typing.List[ExtractionResult]:
571
+ """
572
+ Parameters:
573
+ - schema_id: str. The id of the schema
574
+
575
+ - file_ids: typing.List[str]. The ids of the files
576
+ ---
577
+ from llama_cloud.client import AsyncLlamaCloud
578
+
579
+ client = AsyncLlamaCloud(
580
+ token="YOUR_TOKEN",
581
+ )
582
+ await client.extraction.run_jobs_in_batch(
583
+ schema_id="string",
584
+ file_ids=[],
585
+ )
586
+ """
587
+ _response = await self._client_wrapper.httpx_client.request(
588
+ "POST",
589
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extraction/jobs/batch"),
590
+ json=jsonable_encoder({"schema_id": schema_id, "file_ids": file_ids}),
591
+ headers=self._client_wrapper.get_headers(),
592
+ timeout=60,
593
+ )
594
+ if 200 <= _response.status_code < 300:
595
+ return pydantic.parse_obj_as(typing.List[ExtractionResult], _response.json()) # type: ignore
596
+ if _response.status_code == 422:
597
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
598
+ try:
599
+ _response_json = _response.json()
600
+ except JSONDecodeError:
601
+ raise ApiError(status_code=_response.status_code, body=_response.text)
602
+ raise ApiError(status_code=_response.status_code, body=_response_json)
603
+
604
+ async def get_job_result(self, job_id: str) -> ExtractionResult:
605
+ """
606
+ Parameters:
607
+ - job_id: str.
608
+ ---
609
+ from llama_cloud.client import AsyncLlamaCloud
610
+
611
+ client = AsyncLlamaCloud(
612
+ token="YOUR_TOKEN",
613
+ )
614
+ await client.extraction.get_job_result(
615
+ job_id="string",
616
+ )
617
+ """
618
+ _response = await self._client_wrapper.httpx_client.request(
619
+ "GET",
620
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/extraction/jobs/{job_id}/result"),
621
+ headers=self._client_wrapper.get_headers(),
622
+ timeout=60,
623
+ )
624
+ if 200 <= _response.status_code < 300:
625
+ return pydantic.parse_obj_as(ExtractionResult, _response.json()) # type: ignore
626
+ if _response.status_code == 422:
627
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
628
+ try:
629
+ _response_json = _response.json()
630
+ except JSONDecodeError:
631
+ raise ApiError(status_code=_response.status_code, body=_response.text)
632
+ raise ApiError(status_code=_response.status_code, body=_response_json)
@@ -0,0 +1,5 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ from .extraction_schema_update_data_schema_value import ExtractionSchemaUpdateDataSchemaValue
4
+
5
+ __all__ = ["ExtractionSchemaUpdateDataSchemaValue"]
@@ -0,0 +1,7 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ ExtractionSchemaUpdateDataSchemaValue = typing.Union[
6
+ typing.Dict[str, typing.Any], typing.List[typing.Any], str, int, float, bool
7
+ ]
@@ -0,0 +1,2 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+