llama-cloud 0.0.3__py3-none-any.whl → 0.0.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of llama-cloud might be problematic. Click here for more details.

@@ -41,7 +41,6 @@ class EvalsClient:
41
41
 
42
42
  client = LlamaCloud(
43
43
  token="YOUR_TOKEN",
44
- base_url="https://yourhost.com/path/to/api",
45
44
  )
46
45
  client.evals.get_dataset(
47
46
  dataset_id="string",
@@ -76,7 +75,6 @@ class EvalsClient:
76
75
 
77
76
  client = LlamaCloud(
78
77
  token="YOUR_TOKEN",
79
- base_url="https://yourhost.com/path/to/api",
80
78
  )
81
79
  client.evals.update_dataset(
82
80
  dataset_id="string",
@@ -111,7 +109,6 @@ class EvalsClient:
111
109
 
112
110
  client = LlamaCloud(
113
111
  token="YOUR_TOKEN",
114
- base_url="https://yourhost.com/path/to/api",
115
112
  )
116
113
  client.evals.delete_dataset(
117
114
  dataset_id="string",
@@ -144,7 +141,6 @@ class EvalsClient:
144
141
 
145
142
  client = LlamaCloud(
146
143
  token="YOUR_TOKEN",
147
- base_url="https://yourhost.com/path/to/api",
148
144
  )
149
145
  client.evals.get_questions(
150
146
  dataset_id="string",
@@ -182,7 +178,6 @@ class EvalsClient:
182
178
 
183
179
  client = LlamaCloud(
184
180
  token="YOUR_TOKEN",
185
- base_url="https://yourhost.com/path/to/api",
186
181
  )
187
182
  client.evals.create_question(
188
183
  dataset_id="string",
@@ -225,7 +220,6 @@ class EvalsClient:
225
220
 
226
221
  client = LlamaCloud(
227
222
  token="YOUR_TOKEN",
228
- base_url="https://yourhost.com/path/to/api",
229
223
  )
230
224
  client.evals.create_questions(
231
225
  dataset_id="string",
@@ -262,7 +256,6 @@ class EvalsClient:
262
256
 
263
257
  client = LlamaCloud(
264
258
  token="YOUR_TOKEN",
265
- base_url="https://yourhost.com/path/to/api",
266
259
  )
267
260
  client.evals.get_question(
268
261
  question_id="string",
@@ -298,7 +291,6 @@ class EvalsClient:
298
291
 
299
292
  client = LlamaCloud(
300
293
  token="YOUR_TOKEN",
301
- base_url="https://yourhost.com/path/to/api",
302
294
  )
303
295
  client.evals.replace_question(
304
296
  question_id="string",
@@ -335,7 +327,6 @@ class EvalsClient:
335
327
 
336
328
  client = LlamaCloud(
337
329
  token="YOUR_TOKEN",
338
- base_url="https://yourhost.com/path/to/api",
339
330
  )
340
331
  client.evals.delete_question(
341
332
  question_id="string",
@@ -366,7 +357,6 @@ class EvalsClient:
366
357
 
367
358
  client = LlamaCloud(
368
359
  token="YOUR_TOKEN",
369
- base_url="https://yourhost.com/path/to/api",
370
360
  )
371
361
  client.evals.get_supported_models()
372
362
  """
@@ -402,7 +392,6 @@ class AsyncEvalsClient:
402
392
 
403
393
  client = AsyncLlamaCloud(
404
394
  token="YOUR_TOKEN",
405
- base_url="https://yourhost.com/path/to/api",
406
395
  )
407
396
  await client.evals.get_dataset(
408
397
  dataset_id="string",
@@ -437,7 +426,6 @@ class AsyncEvalsClient:
437
426
 
438
427
  client = AsyncLlamaCloud(
439
428
  token="YOUR_TOKEN",
440
- base_url="https://yourhost.com/path/to/api",
441
429
  )
442
430
  await client.evals.update_dataset(
443
431
  dataset_id="string",
@@ -472,7 +460,6 @@ class AsyncEvalsClient:
472
460
 
473
461
  client = AsyncLlamaCloud(
474
462
  token="YOUR_TOKEN",
475
- base_url="https://yourhost.com/path/to/api",
476
463
  )
477
464
  await client.evals.delete_dataset(
478
465
  dataset_id="string",
@@ -505,7 +492,6 @@ class AsyncEvalsClient:
505
492
 
506
493
  client = AsyncLlamaCloud(
507
494
  token="YOUR_TOKEN",
508
- base_url="https://yourhost.com/path/to/api",
509
495
  )
510
496
  await client.evals.get_questions(
511
497
  dataset_id="string",
@@ -543,7 +529,6 @@ class AsyncEvalsClient:
543
529
 
544
530
  client = AsyncLlamaCloud(
545
531
  token="YOUR_TOKEN",
546
- base_url="https://yourhost.com/path/to/api",
547
532
  )
548
533
  await client.evals.create_question(
549
534
  dataset_id="string",
@@ -586,7 +571,6 @@ class AsyncEvalsClient:
586
571
 
587
572
  client = AsyncLlamaCloud(
588
573
  token="YOUR_TOKEN",
589
- base_url="https://yourhost.com/path/to/api",
590
574
  )
591
575
  await client.evals.create_questions(
592
576
  dataset_id="string",
@@ -623,7 +607,6 @@ class AsyncEvalsClient:
623
607
 
624
608
  client = AsyncLlamaCloud(
625
609
  token="YOUR_TOKEN",
626
- base_url="https://yourhost.com/path/to/api",
627
610
  )
628
611
  await client.evals.get_question(
629
612
  question_id="string",
@@ -659,7 +642,6 @@ class AsyncEvalsClient:
659
642
 
660
643
  client = AsyncLlamaCloud(
661
644
  token="YOUR_TOKEN",
662
- base_url="https://yourhost.com/path/to/api",
663
645
  )
664
646
  await client.evals.replace_question(
665
647
  question_id="string",
@@ -696,7 +678,6 @@ class AsyncEvalsClient:
696
678
 
697
679
  client = AsyncLlamaCloud(
698
680
  token="YOUR_TOKEN",
699
- base_url="https://yourhost.com/path/to/api",
700
681
  )
701
682
  await client.evals.delete_question(
702
683
  question_id="string",
@@ -727,7 +708,6 @@ class AsyncEvalsClient:
727
708
 
728
709
  client = AsyncLlamaCloud(
729
710
  token="YOUR_TOKEN",
730
- base_url="https://yourhost.com/path/to/api",
731
711
  )
732
712
  await client.evals.get_supported_models()
733
713
  """
@@ -44,7 +44,6 @@ class FilesClient:
44
44
 
45
45
  client = LlamaCloud(
46
46
  token="YOUR_TOKEN",
47
- base_url="https://yourhost.com/path/to/api",
48
47
  )
49
48
  client.files.read_file(
50
49
  id="string",
@@ -80,7 +79,6 @@ class FilesClient:
80
79
 
81
80
  client = LlamaCloud(
82
81
  token="YOUR_TOKEN",
83
- base_url="https://yourhost.com/path/to/api",
84
82
  )
85
83
  client.files.delete_file(
86
84
  id="string",
@@ -114,7 +112,6 @@ class FilesClient:
114
112
 
115
113
  client = LlamaCloud(
116
114
  token="YOUR_TOKEN",
117
- base_url="https://yourhost.com/path/to/api",
118
115
  )
119
116
  client.files.read_files()
120
117
  """
@@ -193,7 +190,6 @@ class FilesClient:
193
190
 
194
191
  client = LlamaCloud(
195
192
  token="YOUR_TOKEN",
196
- base_url="https://yourhost.com/path/to/api",
197
193
  )
198
194
  client.files.generate_presigned_url(
199
195
  name="string",
@@ -237,7 +233,6 @@ class FilesClient:
237
233
 
238
234
  client = LlamaCloud(
239
235
  token="YOUR_TOKEN",
240
- base_url="https://yourhost.com/path/to/api",
241
236
  )
242
237
  client.files.sync_files()
243
238
  """
@@ -271,7 +266,6 @@ class FilesClient:
271
266
 
272
267
  client = LlamaCloud(
273
268
  token="YOUR_TOKEN",
274
- base_url="https://yourhost.com/path/to/api",
275
269
  )
276
270
  client.files.read_file_content(
277
271
  id="string",
@@ -312,7 +306,6 @@ class AsyncFilesClient:
312
306
 
313
307
  client = AsyncLlamaCloud(
314
308
  token="YOUR_TOKEN",
315
- base_url="https://yourhost.com/path/to/api",
316
309
  )
317
310
  await client.files.read_file(
318
311
  id="string",
@@ -348,7 +341,6 @@ class AsyncFilesClient:
348
341
 
349
342
  client = AsyncLlamaCloud(
350
343
  token="YOUR_TOKEN",
351
- base_url="https://yourhost.com/path/to/api",
352
344
  )
353
345
  await client.files.delete_file(
354
346
  id="string",
@@ -382,7 +374,6 @@ class AsyncFilesClient:
382
374
 
383
375
  client = AsyncLlamaCloud(
384
376
  token="YOUR_TOKEN",
385
- base_url="https://yourhost.com/path/to/api",
386
377
  )
387
378
  await client.files.read_files()
388
379
  """
@@ -461,7 +452,6 @@ class AsyncFilesClient:
461
452
 
462
453
  client = AsyncLlamaCloud(
463
454
  token="YOUR_TOKEN",
464
- base_url="https://yourhost.com/path/to/api",
465
455
  )
466
456
  await client.files.generate_presigned_url(
467
457
  name="string",
@@ -505,7 +495,6 @@ class AsyncFilesClient:
505
495
 
506
496
  client = AsyncLlamaCloud(
507
497
  token="YOUR_TOKEN",
508
- base_url="https://yourhost.com/path/to/api",
509
498
  )
510
499
  await client.files.sync_files()
511
500
  """
@@ -539,7 +528,6 @@ class AsyncFilesClient:
539
528
 
540
529
  client = AsyncLlamaCloud(
541
530
  token="YOUR_TOKEN",
542
- base_url="https://yourhost.com/path/to/api",
543
531
  )
544
532
  await client.files.read_file_content(
545
533
  id="string",
@@ -48,7 +48,6 @@ class ParsingClient:
48
48
 
49
49
  client = LlamaCloud(
50
50
  token="YOUR_TOKEN",
51
- base_url="https://yourhost.com/path/to/api",
52
51
  )
53
52
  client.parsing.get_job_image_result(
54
53
  job_id="string",
@@ -82,7 +81,6 @@ class ParsingClient:
82
81
 
83
82
  client = LlamaCloud(
84
83
  token="YOUR_TOKEN",
85
- base_url="https://yourhost.com/path/to/api",
86
84
  )
87
85
  client.parsing.get_supported_file_extensions()
88
86
  """
@@ -181,7 +179,6 @@ class ParsingClient:
181
179
 
182
180
  client = LlamaCloud(
183
181
  token="YOUR_TOKEN",
184
- base_url="https://yourhost.com/path/to/api",
185
182
  )
186
183
  client.parsing.usage()
187
184
  """
@@ -212,7 +209,6 @@ class ParsingClient:
212
209
 
213
210
  client = LlamaCloud(
214
211
  token="YOUR_TOKEN",
215
- base_url="https://yourhost.com/path/to/api",
216
212
  )
217
213
  client.parsing.get_job(
218
214
  job_id="string",
@@ -245,7 +241,6 @@ class ParsingClient:
245
241
 
246
242
  client = LlamaCloud(
247
243
  token="YOUR_TOKEN",
248
- base_url="https://yourhost.com/path/to/api",
249
244
  )
250
245
  client.parsing.get_job_text_result(
251
246
  job_id="string",
@@ -278,7 +273,6 @@ class ParsingClient:
278
273
 
279
274
  client = LlamaCloud(
280
275
  token="YOUR_TOKEN",
281
- base_url="https://yourhost.com/path/to/api",
282
276
  )
283
277
  client.parsing.get_job_raw_text_result(
284
278
  job_id="string",
@@ -313,7 +307,6 @@ class ParsingClient:
313
307
 
314
308
  client = LlamaCloud(
315
309
  token="YOUR_TOKEN",
316
- base_url="https://yourhost.com/path/to/api",
317
310
  )
318
311
  client.parsing.get_job_result(
319
312
  job_id="string",
@@ -348,7 +341,6 @@ class ParsingClient:
348
341
 
349
342
  client = LlamaCloud(
350
343
  token="YOUR_TOKEN",
351
- base_url="https://yourhost.com/path/to/api",
352
344
  )
353
345
  client.parsing.get_job_raw_md_result(
354
346
  job_id="string",
@@ -383,7 +375,6 @@ class ParsingClient:
383
375
 
384
376
  client = LlamaCloud(
385
377
  token="YOUR_TOKEN",
386
- base_url="https://yourhost.com/path/to/api",
387
378
  )
388
379
  client.parsing.get_job_json_result(
389
380
  job_id="string",
@@ -416,7 +407,6 @@ class ParsingClient:
416
407
 
417
408
  client = LlamaCloud(
418
409
  token="YOUR_TOKEN",
419
- base_url="https://yourhost.com/path/to/api",
420
410
  )
421
411
  client.parsing.get_job_json_raw_result(
422
412
  job_id="string",
@@ -449,7 +439,6 @@ class ParsingClient:
449
439
 
450
440
  client = LlamaCloud(
451
441
  token="YOUR_TOKEN",
452
- base_url="https://yourhost.com/path/to/api",
453
442
  )
454
443
  client.parsing.get_parsing_history_result()
455
444
  """
@@ -482,7 +471,6 @@ class ParsingClient:
482
471
 
483
472
  client = LlamaCloud(
484
473
  token="YOUR_TOKEN",
485
- base_url="https://yourhost.com/path/to/api",
486
474
  )
487
475
  client.parsing.generate_presigned_url(
488
476
  job_id="string",
@@ -525,7 +513,6 @@ class AsyncParsingClient:
525
513
 
526
514
  client = AsyncLlamaCloud(
527
515
  token="YOUR_TOKEN",
528
- base_url="https://yourhost.com/path/to/api",
529
516
  )
530
517
  await client.parsing.get_job_image_result(
531
518
  job_id="string",
@@ -559,7 +546,6 @@ class AsyncParsingClient:
559
546
 
560
547
  client = AsyncLlamaCloud(
561
548
  token="YOUR_TOKEN",
562
- base_url="https://yourhost.com/path/to/api",
563
549
  )
564
550
  await client.parsing.get_supported_file_extensions()
565
551
  """
@@ -658,7 +644,6 @@ class AsyncParsingClient:
658
644
 
659
645
  client = AsyncLlamaCloud(
660
646
  token="YOUR_TOKEN",
661
- base_url="https://yourhost.com/path/to/api",
662
647
  )
663
648
  await client.parsing.usage()
664
649
  """
@@ -689,7 +674,6 @@ class AsyncParsingClient:
689
674
 
690
675
  client = AsyncLlamaCloud(
691
676
  token="YOUR_TOKEN",
692
- base_url="https://yourhost.com/path/to/api",
693
677
  )
694
678
  await client.parsing.get_job(
695
679
  job_id="string",
@@ -722,7 +706,6 @@ class AsyncParsingClient:
722
706
 
723
707
  client = AsyncLlamaCloud(
724
708
  token="YOUR_TOKEN",
725
- base_url="https://yourhost.com/path/to/api",
726
709
  )
727
710
  await client.parsing.get_job_text_result(
728
711
  job_id="string",
@@ -755,7 +738,6 @@ class AsyncParsingClient:
755
738
 
756
739
  client = AsyncLlamaCloud(
757
740
  token="YOUR_TOKEN",
758
- base_url="https://yourhost.com/path/to/api",
759
741
  )
760
742
  await client.parsing.get_job_raw_text_result(
761
743
  job_id="string",
@@ -790,7 +772,6 @@ class AsyncParsingClient:
790
772
 
791
773
  client = AsyncLlamaCloud(
792
774
  token="YOUR_TOKEN",
793
- base_url="https://yourhost.com/path/to/api",
794
775
  )
795
776
  await client.parsing.get_job_result(
796
777
  job_id="string",
@@ -825,7 +806,6 @@ class AsyncParsingClient:
825
806
 
826
807
  client = AsyncLlamaCloud(
827
808
  token="YOUR_TOKEN",
828
- base_url="https://yourhost.com/path/to/api",
829
809
  )
830
810
  await client.parsing.get_job_raw_md_result(
831
811
  job_id="string",
@@ -860,7 +840,6 @@ class AsyncParsingClient:
860
840
 
861
841
  client = AsyncLlamaCloud(
862
842
  token="YOUR_TOKEN",
863
- base_url="https://yourhost.com/path/to/api",
864
843
  )
865
844
  await client.parsing.get_job_json_result(
866
845
  job_id="string",
@@ -893,7 +872,6 @@ class AsyncParsingClient:
893
872
 
894
873
  client = AsyncLlamaCloud(
895
874
  token="YOUR_TOKEN",
896
- base_url="https://yourhost.com/path/to/api",
897
875
  )
898
876
  await client.parsing.get_job_json_raw_result(
899
877
  job_id="string",
@@ -926,7 +904,6 @@ class AsyncParsingClient:
926
904
 
927
905
  client = AsyncLlamaCloud(
928
906
  token="YOUR_TOKEN",
929
- base_url="https://yourhost.com/path/to/api",
930
907
  )
931
908
  await client.parsing.get_parsing_history_result()
932
909
  """
@@ -959,7 +936,6 @@ class AsyncParsingClient:
959
936
 
960
937
  client = AsyncLlamaCloud(
961
938
  token="YOUR_TOKEN",
962
- base_url="https://yourhost.com/path/to/api",
963
939
  )
964
940
  await client.parsing.generate_presigned_url(
965
941
  job_id="string",