llama-cloud 0.0.4__py3-none-any.whl → 0.0.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of llama-cloud might be problematic. Click here for more details.

@@ -1,961 +0,0 @@
1
- # This file was auto-generated by Fern from our API Definition.
2
-
3
- import typing
4
- import urllib.parse
5
- from json.decoder import JSONDecodeError
6
-
7
- from ...core.api_error import ApiError
8
- from ...core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
9
- from ...core.jsonable_encoder import jsonable_encoder
10
- from ...errors.unprocessable_entity_error import UnprocessableEntityError
11
- from ...types.http_validation_error import HttpValidationError
12
- from ...types.llama_parse_supported_file_extensions import LlamaParseSupportedFileExtensions
13
- from ...types.parser_languages import ParserLanguages
14
- from ...types.parsing_history_item import ParsingHistoryItem
15
- from ...types.parsing_job import ParsingJob
16
- from ...types.parsing_job_json_result import ParsingJobJsonResult
17
- from ...types.parsing_job_markdown_result import ParsingJobMarkdownResult
18
- from ...types.parsing_job_text_result import ParsingJobTextResult
19
- from ...types.parsing_usage import ParsingUsage
20
- from ...types.presigned_url import PresignedUrl
21
-
22
- try:
23
- import pydantic
24
- if pydantic.__version__.startswith("1."):
25
- raise ImportError
26
- import pydantic.v1 as pydantic # type: ignore
27
- except ImportError:
28
- import pydantic # type: ignore
29
-
30
- # this is used as the default value for optional parameters
31
- OMIT = typing.cast(typing.Any, ...)
32
-
33
-
34
- class DeprecatedClient:
35
- def __init__(self, *, client_wrapper: SyncClientWrapper):
36
- self._client_wrapper = client_wrapper
37
-
38
- def get_job_image_result(self, job_id: str, name: str) -> None:
39
- """
40
- Get a job by id
41
-
42
- Parameters:
43
- - job_id: str.
44
-
45
- - name: str.
46
- ---
47
- from llama_cloud.client import LlamaCloud
48
-
49
- client = LlamaCloud(
50
- token="YOUR_TOKEN",
51
- )
52
- client.deprecated.get_job_image_result(
53
- job_id="string",
54
- name="string",
55
- )
56
- """
57
- _response = self._client_wrapper.httpx_client.request(
58
- "GET",
59
- urllib.parse.urljoin(
60
- f"{self._client_wrapper.get_base_url()}/", f"api/parsing/job/{job_id}/result/image/{name}"
61
- ),
62
- headers=self._client_wrapper.get_headers(),
63
- timeout=60,
64
- )
65
- if 200 <= _response.status_code < 300:
66
- return
67
- if _response.status_code == 422:
68
- raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
69
- try:
70
- _response_json = _response.json()
71
- except JSONDecodeError:
72
- raise ApiError(status_code=_response.status_code, body=_response.text)
73
- raise ApiError(status_code=_response.status_code, body=_response_json)
74
-
75
- def get_supported_file_extensions(self) -> typing.List[LlamaParseSupportedFileExtensions]:
76
- """
77
- Get a list of supported file extensions
78
-
79
- ---
80
- from llama_cloud.client import LlamaCloud
81
-
82
- client = LlamaCloud(
83
- token="YOUR_TOKEN",
84
- )
85
- client.deprecated.get_supported_file_extensions()
86
- """
87
- _response = self._client_wrapper.httpx_client.request(
88
- "GET",
89
- urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/parsing/supported_file_extensions"),
90
- headers=self._client_wrapper.get_headers(),
91
- timeout=60,
92
- )
93
- if 200 <= _response.status_code < 300:
94
- return pydantic.parse_obj_as(typing.List[LlamaParseSupportedFileExtensions], _response.json()) # type: ignore
95
- try:
96
- _response_json = _response.json()
97
- except JSONDecodeError:
98
- raise ApiError(status_code=_response.status_code, body=_response.text)
99
- raise ApiError(status_code=_response.status_code, body=_response_json)
100
-
101
- def upload_file(
102
- self,
103
- *,
104
- language: typing.List[ParserLanguages],
105
- parsing_instruction: str,
106
- skip_diagonal_text: bool,
107
- invalidate_cache: bool,
108
- do_not_cache: bool,
109
- gpt_4_o_mode: bool,
110
- fast_mode: bool,
111
- gpt_4_o_api_key: str,
112
- do_not_unroll_columns: bool,
113
- page_separator: str,
114
- file: typing.IO,
115
- ) -> ParsingJob:
116
- """
117
- Upload a file to s3 and create a job. return a job id
118
-
119
- Parameters:
120
- - language: typing.List[ParserLanguages].
121
-
122
- - parsing_instruction: str.
123
-
124
- - skip_diagonal_text: bool.
125
-
126
- - invalidate_cache: bool.
127
-
128
- - do_not_cache: bool.
129
-
130
- - gpt_4_o_mode: bool.
131
-
132
- - fast_mode: bool.
133
-
134
- - gpt_4_o_api_key: str.
135
-
136
- - do_not_unroll_columns: bool.
137
-
138
- - page_separator: str.
139
-
140
- - file: typing.IO.
141
- """
142
- _response = self._client_wrapper.httpx_client.request(
143
- "POST",
144
- urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/parsing/upload"),
145
- data=jsonable_encoder(
146
- {
147
- "language": language,
148
- "parsing_instruction": parsing_instruction,
149
- "skip_diagonal_text": skip_diagonal_text,
150
- "invalidate_cache": invalidate_cache,
151
- "do_not_cache": do_not_cache,
152
- "gpt4o_mode": gpt_4_o_mode,
153
- "fast_mode": fast_mode,
154
- "gpt4o_api_key": gpt_4_o_api_key,
155
- "do_not_unroll_columns": do_not_unroll_columns,
156
- "page_separator": page_separator,
157
- }
158
- ),
159
- files={"file": file},
160
- headers=self._client_wrapper.get_headers(),
161
- timeout=60,
162
- )
163
- if 200 <= _response.status_code < 300:
164
- return pydantic.parse_obj_as(ParsingJob, _response.json()) # type: ignore
165
- if _response.status_code == 422:
166
- raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
167
- try:
168
- _response_json = _response.json()
169
- except JSONDecodeError:
170
- raise ApiError(status_code=_response.status_code, body=_response.text)
171
- raise ApiError(status_code=_response.status_code, body=_response_json)
172
-
173
- def usage(self) -> ParsingUsage:
174
- """
175
- Get parsing usage for user
176
-
177
- ---
178
- from llama_cloud.client import LlamaCloud
179
-
180
- client = LlamaCloud(
181
- token="YOUR_TOKEN",
182
- )
183
- client.deprecated.usage()
184
- """
185
- _response = self._client_wrapper.httpx_client.request(
186
- "GET",
187
- urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/parsing/usage"),
188
- headers=self._client_wrapper.get_headers(),
189
- timeout=60,
190
- )
191
- if 200 <= _response.status_code < 300:
192
- return pydantic.parse_obj_as(ParsingUsage, _response.json()) # type: ignore
193
- if _response.status_code == 422:
194
- raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
195
- try:
196
- _response_json = _response.json()
197
- except JSONDecodeError:
198
- raise ApiError(status_code=_response.status_code, body=_response.text)
199
- raise ApiError(status_code=_response.status_code, body=_response_json)
200
-
201
- def get_job(self, job_id: str) -> ParsingJob:
202
- """
203
- Get a job by id
204
-
205
- Parameters:
206
- - job_id: str.
207
- ---
208
- from llama_cloud.client import LlamaCloud
209
-
210
- client = LlamaCloud(
211
- token="YOUR_TOKEN",
212
- )
213
- client.deprecated.get_job(
214
- job_id="string",
215
- )
216
- """
217
- _response = self._client_wrapper.httpx_client.request(
218
- "GET",
219
- urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/parsing/job/{job_id}"),
220
- headers=self._client_wrapper.get_headers(),
221
- timeout=60,
222
- )
223
- if 200 <= _response.status_code < 300:
224
- return pydantic.parse_obj_as(ParsingJob, _response.json()) # type: ignore
225
- if _response.status_code == 422:
226
- raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
227
- try:
228
- _response_json = _response.json()
229
- except JSONDecodeError:
230
- raise ApiError(status_code=_response.status_code, body=_response.text)
231
- raise ApiError(status_code=_response.status_code, body=_response_json)
232
-
233
- def get_job_text_result(self, job_id: str) -> ParsingJobTextResult:
234
- """
235
- Get a job by id
236
-
237
- Parameters:
238
- - job_id: str.
239
- ---
240
- from llama_cloud.client import LlamaCloud
241
-
242
- client = LlamaCloud(
243
- token="YOUR_TOKEN",
244
- )
245
- client.deprecated.get_job_text_result(
246
- job_id="string",
247
- )
248
- """
249
- _response = self._client_wrapper.httpx_client.request(
250
- "GET",
251
- urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/parsing/job/{job_id}/result/text"),
252
- headers=self._client_wrapper.get_headers(),
253
- timeout=60,
254
- )
255
- if 200 <= _response.status_code < 300:
256
- return pydantic.parse_obj_as(ParsingJobTextResult, _response.json()) # type: ignore
257
- if _response.status_code == 422:
258
- raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
259
- try:
260
- _response_json = _response.json()
261
- except JSONDecodeError:
262
- raise ApiError(status_code=_response.status_code, body=_response.text)
263
- raise ApiError(status_code=_response.status_code, body=_response_json)
264
-
265
- def get_job_raw_text_result(self, job_id: str) -> typing.Any:
266
- """
267
- Get a job by id
268
-
269
- Parameters:
270
- - job_id: str.
271
- ---
272
- from llama_cloud.client import LlamaCloud
273
-
274
- client = LlamaCloud(
275
- token="YOUR_TOKEN",
276
- )
277
- client.deprecated.get_job_raw_text_result(
278
- job_id="string",
279
- )
280
- """
281
- _response = self._client_wrapper.httpx_client.request(
282
- "GET",
283
- urllib.parse.urljoin(
284
- f"{self._client_wrapper.get_base_url()}/", f"api/parsing/job/{job_id}/result/raw/text"
285
- ),
286
- headers=self._client_wrapper.get_headers(),
287
- timeout=60,
288
- )
289
- if 200 <= _response.status_code < 300:
290
- return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
291
- if _response.status_code == 422:
292
- raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
293
- try:
294
- _response_json = _response.json()
295
- except JSONDecodeError:
296
- raise ApiError(status_code=_response.status_code, body=_response.text)
297
- raise ApiError(status_code=_response.status_code, body=_response_json)
298
-
299
- def get_job_result(self, job_id: str) -> ParsingJobMarkdownResult:
300
- """
301
- Get a job by id
302
-
303
- Parameters:
304
- - job_id: str.
305
- ---
306
- from llama_cloud.client import LlamaCloud
307
-
308
- client = LlamaCloud(
309
- token="YOUR_TOKEN",
310
- )
311
- client.deprecated.get_job_result(
312
- job_id="string",
313
- )
314
- """
315
- _response = self._client_wrapper.httpx_client.request(
316
- "GET",
317
- urllib.parse.urljoin(
318
- f"{self._client_wrapper.get_base_url()}/", f"api/parsing/job/{job_id}/result/markdown"
319
- ),
320
- headers=self._client_wrapper.get_headers(),
321
- timeout=60,
322
- )
323
- if 200 <= _response.status_code < 300:
324
- return pydantic.parse_obj_as(ParsingJobMarkdownResult, _response.json()) # type: ignore
325
- if _response.status_code == 422:
326
- raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
327
- try:
328
- _response_json = _response.json()
329
- except JSONDecodeError:
330
- raise ApiError(status_code=_response.status_code, body=_response.text)
331
- raise ApiError(status_code=_response.status_code, body=_response_json)
332
-
333
- def get_job_raw_md_result(self, job_id: str) -> typing.Any:
334
- """
335
- Get a job by id
336
-
337
- Parameters:
338
- - job_id: str.
339
- ---
340
- from llama_cloud.client import LlamaCloud
341
-
342
- client = LlamaCloud(
343
- token="YOUR_TOKEN",
344
- )
345
- client.deprecated.get_job_raw_md_result(
346
- job_id="string",
347
- )
348
- """
349
- _response = self._client_wrapper.httpx_client.request(
350
- "GET",
351
- urllib.parse.urljoin(
352
- f"{self._client_wrapper.get_base_url()}/", f"api/parsing/job/{job_id}/result/raw/markdown"
353
- ),
354
- headers=self._client_wrapper.get_headers(),
355
- timeout=60,
356
- )
357
- if 200 <= _response.status_code < 300:
358
- return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
359
- if _response.status_code == 422:
360
- raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
361
- try:
362
- _response_json = _response.json()
363
- except JSONDecodeError:
364
- raise ApiError(status_code=_response.status_code, body=_response.text)
365
- raise ApiError(status_code=_response.status_code, body=_response_json)
366
-
367
- def get_job_json_result(self, job_id: str) -> ParsingJobJsonResult:
368
- """
369
- Get a job by id
370
-
371
- Parameters:
372
- - job_id: str.
373
- ---
374
- from llama_cloud.client import LlamaCloud
375
-
376
- client = LlamaCloud(
377
- token="YOUR_TOKEN",
378
- )
379
- client.deprecated.get_job_json_result(
380
- job_id="string",
381
- )
382
- """
383
- _response = self._client_wrapper.httpx_client.request(
384
- "GET",
385
- urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/parsing/job/{job_id}/result/json"),
386
- headers=self._client_wrapper.get_headers(),
387
- timeout=60,
388
- )
389
- if 200 <= _response.status_code < 300:
390
- return pydantic.parse_obj_as(ParsingJobJsonResult, _response.json()) # type: ignore
391
- if _response.status_code == 422:
392
- raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
393
- try:
394
- _response_json = _response.json()
395
- except JSONDecodeError:
396
- raise ApiError(status_code=_response.status_code, body=_response.text)
397
- raise ApiError(status_code=_response.status_code, body=_response_json)
398
-
399
- def get_job_json_raw_result(self, job_id: str) -> typing.Any:
400
- """
401
- Get a job by id
402
-
403
- Parameters:
404
- - job_id: str.
405
- ---
406
- from llama_cloud.client import LlamaCloud
407
-
408
- client = LlamaCloud(
409
- token="YOUR_TOKEN",
410
- )
411
- client.deprecated.get_job_json_raw_result(
412
- job_id="string",
413
- )
414
- """
415
- _response = self._client_wrapper.httpx_client.request(
416
- "GET",
417
- urllib.parse.urljoin(
418
- f"{self._client_wrapper.get_base_url()}/", f"api/parsing/job/{job_id}/result/raw/json"
419
- ),
420
- headers=self._client_wrapper.get_headers(),
421
- timeout=60,
422
- )
423
- if 200 <= _response.status_code < 300:
424
- return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
425
- if _response.status_code == 422:
426
- raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
427
- try:
428
- _response_json = _response.json()
429
- except JSONDecodeError:
430
- raise ApiError(status_code=_response.status_code, body=_response.text)
431
- raise ApiError(status_code=_response.status_code, body=_response_json)
432
-
433
- def get_parsing_history_result(self) -> typing.List[ParsingHistoryItem]:
434
- """
435
- Get parsing history for user
436
-
437
- ---
438
- from llama_cloud.client import LlamaCloud
439
-
440
- client = LlamaCloud(
441
- token="YOUR_TOKEN",
442
- )
443
- client.deprecated.get_parsing_history_result()
444
- """
445
- _response = self._client_wrapper.httpx_client.request(
446
- "GET",
447
- urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/parsing/history"),
448
- headers=self._client_wrapper.get_headers(),
449
- timeout=60,
450
- )
451
- if 200 <= _response.status_code < 300:
452
- return pydantic.parse_obj_as(typing.List[ParsingHistoryItem], _response.json()) # type: ignore
453
- if _response.status_code == 422:
454
- raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
455
- try:
456
- _response_json = _response.json()
457
- except JSONDecodeError:
458
- raise ApiError(status_code=_response.status_code, body=_response.text)
459
- raise ApiError(status_code=_response.status_code, body=_response_json)
460
-
461
- def generate_presigned_url(self, job_id: str, filename: str) -> PresignedUrl:
462
- """
463
- Generate a presigned URL for a job
464
-
465
- Parameters:
466
- - job_id: str.
467
-
468
- - filename: str.
469
- ---
470
- from llama_cloud.client import LlamaCloud
471
-
472
- client = LlamaCloud(
473
- token="YOUR_TOKEN",
474
- )
475
- client.deprecated.generate_presigned_url(
476
- job_id="string",
477
- filename="string",
478
- )
479
- """
480
- _response = self._client_wrapper.httpx_client.request(
481
- "GET",
482
- urllib.parse.urljoin(
483
- f"{self._client_wrapper.get_base_url()}/", f"api/parsing/job/{job_id}/read/{filename}"
484
- ),
485
- headers=self._client_wrapper.get_headers(),
486
- timeout=60,
487
- )
488
- if 200 <= _response.status_code < 300:
489
- return pydantic.parse_obj_as(PresignedUrl, _response.json()) # type: ignore
490
- if _response.status_code == 422:
491
- raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
492
- try:
493
- _response_json = _response.json()
494
- except JSONDecodeError:
495
- raise ApiError(status_code=_response.status_code, body=_response.text)
496
- raise ApiError(status_code=_response.status_code, body=_response_json)
497
-
498
-
499
- class AsyncDeprecatedClient:
500
- def __init__(self, *, client_wrapper: AsyncClientWrapper):
501
- self._client_wrapper = client_wrapper
502
-
503
- async def get_job_image_result(self, job_id: str, name: str) -> None:
504
- """
505
- Get a job by id
506
-
507
- Parameters:
508
- - job_id: str.
509
-
510
- - name: str.
511
- ---
512
- from llama_cloud.client import AsyncLlamaCloud
513
-
514
- client = AsyncLlamaCloud(
515
- token="YOUR_TOKEN",
516
- )
517
- await client.deprecated.get_job_image_result(
518
- job_id="string",
519
- name="string",
520
- )
521
- """
522
- _response = await self._client_wrapper.httpx_client.request(
523
- "GET",
524
- urllib.parse.urljoin(
525
- f"{self._client_wrapper.get_base_url()}/", f"api/parsing/job/{job_id}/result/image/{name}"
526
- ),
527
- headers=self._client_wrapper.get_headers(),
528
- timeout=60,
529
- )
530
- if 200 <= _response.status_code < 300:
531
- return
532
- if _response.status_code == 422:
533
- raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
534
- try:
535
- _response_json = _response.json()
536
- except JSONDecodeError:
537
- raise ApiError(status_code=_response.status_code, body=_response.text)
538
- raise ApiError(status_code=_response.status_code, body=_response_json)
539
-
540
- async def get_supported_file_extensions(self) -> typing.List[LlamaParseSupportedFileExtensions]:
541
- """
542
- Get a list of supported file extensions
543
-
544
- ---
545
- from llama_cloud.client import AsyncLlamaCloud
546
-
547
- client = AsyncLlamaCloud(
548
- token="YOUR_TOKEN",
549
- )
550
- await client.deprecated.get_supported_file_extensions()
551
- """
552
- _response = await self._client_wrapper.httpx_client.request(
553
- "GET",
554
- urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/parsing/supported_file_extensions"),
555
- headers=self._client_wrapper.get_headers(),
556
- timeout=60,
557
- )
558
- if 200 <= _response.status_code < 300:
559
- return pydantic.parse_obj_as(typing.List[LlamaParseSupportedFileExtensions], _response.json()) # type: ignore
560
- try:
561
- _response_json = _response.json()
562
- except JSONDecodeError:
563
- raise ApiError(status_code=_response.status_code, body=_response.text)
564
- raise ApiError(status_code=_response.status_code, body=_response_json)
565
-
566
- async def upload_file(
567
- self,
568
- *,
569
- language: typing.List[ParserLanguages],
570
- parsing_instruction: str,
571
- skip_diagonal_text: bool,
572
- invalidate_cache: bool,
573
- do_not_cache: bool,
574
- gpt_4_o_mode: bool,
575
- fast_mode: bool,
576
- gpt_4_o_api_key: str,
577
- do_not_unroll_columns: bool,
578
- page_separator: str,
579
- file: typing.IO,
580
- ) -> ParsingJob:
581
- """
582
- Upload a file to s3 and create a job. return a job id
583
-
584
- Parameters:
585
- - language: typing.List[ParserLanguages].
586
-
587
- - parsing_instruction: str.
588
-
589
- - skip_diagonal_text: bool.
590
-
591
- - invalidate_cache: bool.
592
-
593
- - do_not_cache: bool.
594
-
595
- - gpt_4_o_mode: bool.
596
-
597
- - fast_mode: bool.
598
-
599
- - gpt_4_o_api_key: str.
600
-
601
- - do_not_unroll_columns: bool.
602
-
603
- - page_separator: str.
604
-
605
- - file: typing.IO.
606
- """
607
- _response = await self._client_wrapper.httpx_client.request(
608
- "POST",
609
- urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/parsing/upload"),
610
- data=jsonable_encoder(
611
- {
612
- "language": language,
613
- "parsing_instruction": parsing_instruction,
614
- "skip_diagonal_text": skip_diagonal_text,
615
- "invalidate_cache": invalidate_cache,
616
- "do_not_cache": do_not_cache,
617
- "gpt4o_mode": gpt_4_o_mode,
618
- "fast_mode": fast_mode,
619
- "gpt4o_api_key": gpt_4_o_api_key,
620
- "do_not_unroll_columns": do_not_unroll_columns,
621
- "page_separator": page_separator,
622
- }
623
- ),
624
- files={"file": file},
625
- headers=self._client_wrapper.get_headers(),
626
- timeout=60,
627
- )
628
- if 200 <= _response.status_code < 300:
629
- return pydantic.parse_obj_as(ParsingJob, _response.json()) # type: ignore
630
- if _response.status_code == 422:
631
- raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
632
- try:
633
- _response_json = _response.json()
634
- except JSONDecodeError:
635
- raise ApiError(status_code=_response.status_code, body=_response.text)
636
- raise ApiError(status_code=_response.status_code, body=_response_json)
637
-
638
- async def usage(self) -> ParsingUsage:
639
- """
640
- Get parsing usage for user
641
-
642
- ---
643
- from llama_cloud.client import AsyncLlamaCloud
644
-
645
- client = AsyncLlamaCloud(
646
- token="YOUR_TOKEN",
647
- )
648
- await client.deprecated.usage()
649
- """
650
- _response = await self._client_wrapper.httpx_client.request(
651
- "GET",
652
- urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/parsing/usage"),
653
- headers=self._client_wrapper.get_headers(),
654
- timeout=60,
655
- )
656
- if 200 <= _response.status_code < 300:
657
- return pydantic.parse_obj_as(ParsingUsage, _response.json()) # type: ignore
658
- if _response.status_code == 422:
659
- raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
660
- try:
661
- _response_json = _response.json()
662
- except JSONDecodeError:
663
- raise ApiError(status_code=_response.status_code, body=_response.text)
664
- raise ApiError(status_code=_response.status_code, body=_response_json)
665
-
666
- async def get_job(self, job_id: str) -> ParsingJob:
667
- """
668
- Get a job by id
669
-
670
- Parameters:
671
- - job_id: str.
672
- ---
673
- from llama_cloud.client import AsyncLlamaCloud
674
-
675
- client = AsyncLlamaCloud(
676
- token="YOUR_TOKEN",
677
- )
678
- await client.deprecated.get_job(
679
- job_id="string",
680
- )
681
- """
682
- _response = await self._client_wrapper.httpx_client.request(
683
- "GET",
684
- urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/parsing/job/{job_id}"),
685
- headers=self._client_wrapper.get_headers(),
686
- timeout=60,
687
- )
688
- if 200 <= _response.status_code < 300:
689
- return pydantic.parse_obj_as(ParsingJob, _response.json()) # type: ignore
690
- if _response.status_code == 422:
691
- raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
692
- try:
693
- _response_json = _response.json()
694
- except JSONDecodeError:
695
- raise ApiError(status_code=_response.status_code, body=_response.text)
696
- raise ApiError(status_code=_response.status_code, body=_response_json)
697
-
698
- async def get_job_text_result(self, job_id: str) -> ParsingJobTextResult:
699
- """
700
- Get a job by id
701
-
702
- Parameters:
703
- - job_id: str.
704
- ---
705
- from llama_cloud.client import AsyncLlamaCloud
706
-
707
- client = AsyncLlamaCloud(
708
- token="YOUR_TOKEN",
709
- )
710
- await client.deprecated.get_job_text_result(
711
- job_id="string",
712
- )
713
- """
714
- _response = await self._client_wrapper.httpx_client.request(
715
- "GET",
716
- urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/parsing/job/{job_id}/result/text"),
717
- headers=self._client_wrapper.get_headers(),
718
- timeout=60,
719
- )
720
- if 200 <= _response.status_code < 300:
721
- return pydantic.parse_obj_as(ParsingJobTextResult, _response.json()) # type: ignore
722
- if _response.status_code == 422:
723
- raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
724
- try:
725
- _response_json = _response.json()
726
- except JSONDecodeError:
727
- raise ApiError(status_code=_response.status_code, body=_response.text)
728
- raise ApiError(status_code=_response.status_code, body=_response_json)
729
-
730
- async def get_job_raw_text_result(self, job_id: str) -> typing.Any:
731
- """
732
- Get a job by id
733
-
734
- Parameters:
735
- - job_id: str.
736
- ---
737
- from llama_cloud.client import AsyncLlamaCloud
738
-
739
- client = AsyncLlamaCloud(
740
- token="YOUR_TOKEN",
741
- )
742
- await client.deprecated.get_job_raw_text_result(
743
- job_id="string",
744
- )
745
- """
746
- _response = await self._client_wrapper.httpx_client.request(
747
- "GET",
748
- urllib.parse.urljoin(
749
- f"{self._client_wrapper.get_base_url()}/", f"api/parsing/job/{job_id}/result/raw/text"
750
- ),
751
- headers=self._client_wrapper.get_headers(),
752
- timeout=60,
753
- )
754
- if 200 <= _response.status_code < 300:
755
- return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
756
- if _response.status_code == 422:
757
- raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
758
- try:
759
- _response_json = _response.json()
760
- except JSONDecodeError:
761
- raise ApiError(status_code=_response.status_code, body=_response.text)
762
- raise ApiError(status_code=_response.status_code, body=_response_json)
763
-
764
- async def get_job_result(self, job_id: str) -> ParsingJobMarkdownResult:
765
- """
766
- Get a job by id
767
-
768
- Parameters:
769
- - job_id: str.
770
- ---
771
- from llama_cloud.client import AsyncLlamaCloud
772
-
773
- client = AsyncLlamaCloud(
774
- token="YOUR_TOKEN",
775
- )
776
- await client.deprecated.get_job_result(
777
- job_id="string",
778
- )
779
- """
780
- _response = await self._client_wrapper.httpx_client.request(
781
- "GET",
782
- urllib.parse.urljoin(
783
- f"{self._client_wrapper.get_base_url()}/", f"api/parsing/job/{job_id}/result/markdown"
784
- ),
785
- headers=self._client_wrapper.get_headers(),
786
- timeout=60,
787
- )
788
- if 200 <= _response.status_code < 300:
789
- return pydantic.parse_obj_as(ParsingJobMarkdownResult, _response.json()) # type: ignore
790
- if _response.status_code == 422:
791
- raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
792
- try:
793
- _response_json = _response.json()
794
- except JSONDecodeError:
795
- raise ApiError(status_code=_response.status_code, body=_response.text)
796
- raise ApiError(status_code=_response.status_code, body=_response_json)
797
-
798
- async def get_job_raw_md_result(self, job_id: str) -> typing.Any:
799
- """
800
- Get a job by id
801
-
802
- Parameters:
803
- - job_id: str.
804
- ---
805
- from llama_cloud.client import AsyncLlamaCloud
806
-
807
- client = AsyncLlamaCloud(
808
- token="YOUR_TOKEN",
809
- )
810
- await client.deprecated.get_job_raw_md_result(
811
- job_id="string",
812
- )
813
- """
814
- _response = await self._client_wrapper.httpx_client.request(
815
- "GET",
816
- urllib.parse.urljoin(
817
- f"{self._client_wrapper.get_base_url()}/", f"api/parsing/job/{job_id}/result/raw/markdown"
818
- ),
819
- headers=self._client_wrapper.get_headers(),
820
- timeout=60,
821
- )
822
- if 200 <= _response.status_code < 300:
823
- return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
824
- if _response.status_code == 422:
825
- raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
826
- try:
827
- _response_json = _response.json()
828
- except JSONDecodeError:
829
- raise ApiError(status_code=_response.status_code, body=_response.text)
830
- raise ApiError(status_code=_response.status_code, body=_response_json)
831
-
832
- async def get_job_json_result(self, job_id: str) -> ParsingJobJsonResult:
833
- """
834
- Get a job by id
835
-
836
- Parameters:
837
- - job_id: str.
838
- ---
839
- from llama_cloud.client import AsyncLlamaCloud
840
-
841
- client = AsyncLlamaCloud(
842
- token="YOUR_TOKEN",
843
- )
844
- await client.deprecated.get_job_json_result(
845
- job_id="string",
846
- )
847
- """
848
- _response = await self._client_wrapper.httpx_client.request(
849
- "GET",
850
- urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/parsing/job/{job_id}/result/json"),
851
- headers=self._client_wrapper.get_headers(),
852
- timeout=60,
853
- )
854
- if 200 <= _response.status_code < 300:
855
- return pydantic.parse_obj_as(ParsingJobJsonResult, _response.json()) # type: ignore
856
- if _response.status_code == 422:
857
- raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
858
- try:
859
- _response_json = _response.json()
860
- except JSONDecodeError:
861
- raise ApiError(status_code=_response.status_code, body=_response.text)
862
- raise ApiError(status_code=_response.status_code, body=_response_json)
863
-
864
- async def get_job_json_raw_result(self, job_id: str) -> typing.Any:
865
- """
866
- Get a job by id
867
-
868
- Parameters:
869
- - job_id: str.
870
- ---
871
- from llama_cloud.client import AsyncLlamaCloud
872
-
873
- client = AsyncLlamaCloud(
874
- token="YOUR_TOKEN",
875
- )
876
- await client.deprecated.get_job_json_raw_result(
877
- job_id="string",
878
- )
879
- """
880
- _response = await self._client_wrapper.httpx_client.request(
881
- "GET",
882
- urllib.parse.urljoin(
883
- f"{self._client_wrapper.get_base_url()}/", f"api/parsing/job/{job_id}/result/raw/json"
884
- ),
885
- headers=self._client_wrapper.get_headers(),
886
- timeout=60,
887
- )
888
- if 200 <= _response.status_code < 300:
889
- return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
890
- if _response.status_code == 422:
891
- raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
892
- try:
893
- _response_json = _response.json()
894
- except JSONDecodeError:
895
- raise ApiError(status_code=_response.status_code, body=_response.text)
896
- raise ApiError(status_code=_response.status_code, body=_response_json)
897
-
898
- async def get_parsing_history_result(self) -> typing.List[ParsingHistoryItem]:
899
- """
900
- Get parsing history for user
901
-
902
- ---
903
- from llama_cloud.client import AsyncLlamaCloud
904
-
905
- client = AsyncLlamaCloud(
906
- token="YOUR_TOKEN",
907
- )
908
- await client.deprecated.get_parsing_history_result()
909
- """
910
- _response = await self._client_wrapper.httpx_client.request(
911
- "GET",
912
- urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/parsing/history"),
913
- headers=self._client_wrapper.get_headers(),
914
- timeout=60,
915
- )
916
- if 200 <= _response.status_code < 300:
917
- return pydantic.parse_obj_as(typing.List[ParsingHistoryItem], _response.json()) # type: ignore
918
- if _response.status_code == 422:
919
- raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
920
- try:
921
- _response_json = _response.json()
922
- except JSONDecodeError:
923
- raise ApiError(status_code=_response.status_code, body=_response.text)
924
- raise ApiError(status_code=_response.status_code, body=_response_json)
925
-
926
- async def generate_presigned_url(self, job_id: str, filename: str) -> PresignedUrl:
927
- """
928
- Generate a presigned URL for a job
929
-
930
- Parameters:
931
- - job_id: str.
932
-
933
- - filename: str.
934
- ---
935
- from llama_cloud.client import AsyncLlamaCloud
936
-
937
- client = AsyncLlamaCloud(
938
- token="YOUR_TOKEN",
939
- )
940
- await client.deprecated.generate_presigned_url(
941
- job_id="string",
942
- filename="string",
943
- )
944
- """
945
- _response = await self._client_wrapper.httpx_client.request(
946
- "GET",
947
- urllib.parse.urljoin(
948
- f"{self._client_wrapper.get_base_url()}/", f"api/parsing/job/{job_id}/read/{filename}"
949
- ),
950
- headers=self._client_wrapper.get_headers(),
951
- timeout=60,
952
- )
953
- if 200 <= _response.status_code < 300:
954
- return pydantic.parse_obj_as(PresignedUrl, _response.json()) # type: ignore
955
- if _response.status_code == 422:
956
- raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
957
- try:
958
- _response_json = _response.json()
959
- except JSONDecodeError:
960
- raise ApiError(status_code=_response.status_code, body=_response.text)
961
- raise ApiError(status_code=_response.status_code, body=_response_json)