llama-cloud 0.0.1__py3-none-any.whl → 0.0.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of llama-cloud might be problematic. Click here for more details.
- llama_cloud/client.py +2 -2
- llama_cloud/resources/api_keys/client.py +16 -16
- llama_cloud/resources/billing/client.py +12 -12
- llama_cloud/resources/component_definitions/client.py +12 -12
- llama_cloud/resources/data_sinks/client.py +24 -24
- llama_cloud/resources/data_sources/client.py +24 -24
- llama_cloud/resources/deprecated/client.py +48 -48
- llama_cloud/resources/evals/client.py +40 -40
- llama_cloud/resources/files/client.py +24 -24
- llama_cloud/resources/parsing/client.py +48 -48
- llama_cloud/resources/pipelines/client.py +223 -152
- llama_cloud/resources/projects/client.py +64 -64
- {llama_cloud-0.0.1.dist-info → llama_cloud-0.0.2.dist-info}/METADATA +1 -1
- {llama_cloud-0.0.1.dist-info → llama_cloud-0.0.2.dist-info}/RECORD +16 -16
- {llama_cloud-0.0.1.dist-info → llama_cloud-0.0.2.dist-info}/LICENSE +0 -0
- {llama_cloud-0.0.1.dist-info → llama_cloud-0.0.2.dist-info}/WHEEL +0 -0
|
@@ -41,9 +41,9 @@ class DeprecatedClient:
|
|
|
41
41
|
|
|
42
42
|
- name: str.
|
|
43
43
|
---
|
|
44
|
-
from platform.client import
|
|
44
|
+
from platform.client import LlamaCloud
|
|
45
45
|
|
|
46
|
-
client =
|
|
46
|
+
client = LlamaCloud(
|
|
47
47
|
token="YOUR_TOKEN",
|
|
48
48
|
base_url="https://yourhost.com/path/to/api",
|
|
49
49
|
)
|
|
@@ -75,9 +75,9 @@ class DeprecatedClient:
|
|
|
75
75
|
Get a list of supported file extensions
|
|
76
76
|
|
|
77
77
|
---
|
|
78
|
-
from platform.client import
|
|
78
|
+
from platform.client import LlamaCloud
|
|
79
79
|
|
|
80
|
-
client =
|
|
80
|
+
client = LlamaCloud(
|
|
81
81
|
token="YOUR_TOKEN",
|
|
82
82
|
base_url="https://yourhost.com/path/to/api",
|
|
83
83
|
)
|
|
@@ -174,9 +174,9 @@ class DeprecatedClient:
|
|
|
174
174
|
Get parsing usage for user
|
|
175
175
|
|
|
176
176
|
---
|
|
177
|
-
from platform.client import
|
|
177
|
+
from platform.client import LlamaCloud
|
|
178
178
|
|
|
179
|
-
client =
|
|
179
|
+
client = LlamaCloud(
|
|
180
180
|
token="YOUR_TOKEN",
|
|
181
181
|
base_url="https://yourhost.com/path/to/api",
|
|
182
182
|
)
|
|
@@ -205,9 +205,9 @@ class DeprecatedClient:
|
|
|
205
205
|
Parameters:
|
|
206
206
|
- job_id: str.
|
|
207
207
|
---
|
|
208
|
-
from platform.client import
|
|
208
|
+
from platform.client import LlamaCloud
|
|
209
209
|
|
|
210
|
-
client =
|
|
210
|
+
client = LlamaCloud(
|
|
211
211
|
token="YOUR_TOKEN",
|
|
212
212
|
base_url="https://yourhost.com/path/to/api",
|
|
213
213
|
)
|
|
@@ -238,9 +238,9 @@ class DeprecatedClient:
|
|
|
238
238
|
Parameters:
|
|
239
239
|
- job_id: str.
|
|
240
240
|
---
|
|
241
|
-
from platform.client import
|
|
241
|
+
from platform.client import LlamaCloud
|
|
242
242
|
|
|
243
|
-
client =
|
|
243
|
+
client = LlamaCloud(
|
|
244
244
|
token="YOUR_TOKEN",
|
|
245
245
|
base_url="https://yourhost.com/path/to/api",
|
|
246
246
|
)
|
|
@@ -271,9 +271,9 @@ class DeprecatedClient:
|
|
|
271
271
|
Parameters:
|
|
272
272
|
- job_id: str.
|
|
273
273
|
---
|
|
274
|
-
from platform.client import
|
|
274
|
+
from platform.client import LlamaCloud
|
|
275
275
|
|
|
276
|
-
client =
|
|
276
|
+
client = LlamaCloud(
|
|
277
277
|
token="YOUR_TOKEN",
|
|
278
278
|
base_url="https://yourhost.com/path/to/api",
|
|
279
279
|
)
|
|
@@ -306,9 +306,9 @@ class DeprecatedClient:
|
|
|
306
306
|
Parameters:
|
|
307
307
|
- job_id: str.
|
|
308
308
|
---
|
|
309
|
-
from platform.client import
|
|
309
|
+
from platform.client import LlamaCloud
|
|
310
310
|
|
|
311
|
-
client =
|
|
311
|
+
client = LlamaCloud(
|
|
312
312
|
token="YOUR_TOKEN",
|
|
313
313
|
base_url="https://yourhost.com/path/to/api",
|
|
314
314
|
)
|
|
@@ -341,9 +341,9 @@ class DeprecatedClient:
|
|
|
341
341
|
Parameters:
|
|
342
342
|
- job_id: str.
|
|
343
343
|
---
|
|
344
|
-
from platform.client import
|
|
344
|
+
from platform.client import LlamaCloud
|
|
345
345
|
|
|
346
|
-
client =
|
|
346
|
+
client = LlamaCloud(
|
|
347
347
|
token="YOUR_TOKEN",
|
|
348
348
|
base_url="https://yourhost.com/path/to/api",
|
|
349
349
|
)
|
|
@@ -376,9 +376,9 @@ class DeprecatedClient:
|
|
|
376
376
|
Parameters:
|
|
377
377
|
- job_id: str.
|
|
378
378
|
---
|
|
379
|
-
from platform.client import
|
|
379
|
+
from platform.client import LlamaCloud
|
|
380
380
|
|
|
381
|
-
client =
|
|
381
|
+
client = LlamaCloud(
|
|
382
382
|
token="YOUR_TOKEN",
|
|
383
383
|
base_url="https://yourhost.com/path/to/api",
|
|
384
384
|
)
|
|
@@ -409,9 +409,9 @@ class DeprecatedClient:
|
|
|
409
409
|
Parameters:
|
|
410
410
|
- job_id: str.
|
|
411
411
|
---
|
|
412
|
-
from platform.client import
|
|
412
|
+
from platform.client import LlamaCloud
|
|
413
413
|
|
|
414
|
-
client =
|
|
414
|
+
client = LlamaCloud(
|
|
415
415
|
token="YOUR_TOKEN",
|
|
416
416
|
base_url="https://yourhost.com/path/to/api",
|
|
417
417
|
)
|
|
@@ -442,9 +442,9 @@ class DeprecatedClient:
|
|
|
442
442
|
Get parsing history for user
|
|
443
443
|
|
|
444
444
|
---
|
|
445
|
-
from platform.client import
|
|
445
|
+
from platform.client import LlamaCloud
|
|
446
446
|
|
|
447
|
-
client =
|
|
447
|
+
client = LlamaCloud(
|
|
448
448
|
token="YOUR_TOKEN",
|
|
449
449
|
base_url="https://yourhost.com/path/to/api",
|
|
450
450
|
)
|
|
@@ -475,9 +475,9 @@ class DeprecatedClient:
|
|
|
475
475
|
|
|
476
476
|
- filename: str.
|
|
477
477
|
---
|
|
478
|
-
from platform.client import
|
|
478
|
+
from platform.client import LlamaCloud
|
|
479
479
|
|
|
480
|
-
client =
|
|
480
|
+
client = LlamaCloud(
|
|
481
481
|
token="YOUR_TOKEN",
|
|
482
482
|
base_url="https://yourhost.com/path/to/api",
|
|
483
483
|
)
|
|
@@ -518,9 +518,9 @@ class AsyncDeprecatedClient:
|
|
|
518
518
|
|
|
519
519
|
- name: str.
|
|
520
520
|
---
|
|
521
|
-
from platform.client import
|
|
521
|
+
from platform.client import AsyncLlamaCloud
|
|
522
522
|
|
|
523
|
-
client =
|
|
523
|
+
client = AsyncLlamaCloud(
|
|
524
524
|
token="YOUR_TOKEN",
|
|
525
525
|
base_url="https://yourhost.com/path/to/api",
|
|
526
526
|
)
|
|
@@ -552,9 +552,9 @@ class AsyncDeprecatedClient:
|
|
|
552
552
|
Get a list of supported file extensions
|
|
553
553
|
|
|
554
554
|
---
|
|
555
|
-
from platform.client import
|
|
555
|
+
from platform.client import AsyncLlamaCloud
|
|
556
556
|
|
|
557
|
-
client =
|
|
557
|
+
client = AsyncLlamaCloud(
|
|
558
558
|
token="YOUR_TOKEN",
|
|
559
559
|
base_url="https://yourhost.com/path/to/api",
|
|
560
560
|
)
|
|
@@ -651,9 +651,9 @@ class AsyncDeprecatedClient:
|
|
|
651
651
|
Get parsing usage for user
|
|
652
652
|
|
|
653
653
|
---
|
|
654
|
-
from platform.client import
|
|
654
|
+
from platform.client import AsyncLlamaCloud
|
|
655
655
|
|
|
656
|
-
client =
|
|
656
|
+
client = AsyncLlamaCloud(
|
|
657
657
|
token="YOUR_TOKEN",
|
|
658
658
|
base_url="https://yourhost.com/path/to/api",
|
|
659
659
|
)
|
|
@@ -682,9 +682,9 @@ class AsyncDeprecatedClient:
|
|
|
682
682
|
Parameters:
|
|
683
683
|
- job_id: str.
|
|
684
684
|
---
|
|
685
|
-
from platform.client import
|
|
685
|
+
from platform.client import AsyncLlamaCloud
|
|
686
686
|
|
|
687
|
-
client =
|
|
687
|
+
client = AsyncLlamaCloud(
|
|
688
688
|
token="YOUR_TOKEN",
|
|
689
689
|
base_url="https://yourhost.com/path/to/api",
|
|
690
690
|
)
|
|
@@ -715,9 +715,9 @@ class AsyncDeprecatedClient:
|
|
|
715
715
|
Parameters:
|
|
716
716
|
- job_id: str.
|
|
717
717
|
---
|
|
718
|
-
from platform.client import
|
|
718
|
+
from platform.client import AsyncLlamaCloud
|
|
719
719
|
|
|
720
|
-
client =
|
|
720
|
+
client = AsyncLlamaCloud(
|
|
721
721
|
token="YOUR_TOKEN",
|
|
722
722
|
base_url="https://yourhost.com/path/to/api",
|
|
723
723
|
)
|
|
@@ -748,9 +748,9 @@ class AsyncDeprecatedClient:
|
|
|
748
748
|
Parameters:
|
|
749
749
|
- job_id: str.
|
|
750
750
|
---
|
|
751
|
-
from platform.client import
|
|
751
|
+
from platform.client import AsyncLlamaCloud
|
|
752
752
|
|
|
753
|
-
client =
|
|
753
|
+
client = AsyncLlamaCloud(
|
|
754
754
|
token="YOUR_TOKEN",
|
|
755
755
|
base_url="https://yourhost.com/path/to/api",
|
|
756
756
|
)
|
|
@@ -783,9 +783,9 @@ class AsyncDeprecatedClient:
|
|
|
783
783
|
Parameters:
|
|
784
784
|
- job_id: str.
|
|
785
785
|
---
|
|
786
|
-
from platform.client import
|
|
786
|
+
from platform.client import AsyncLlamaCloud
|
|
787
787
|
|
|
788
|
-
client =
|
|
788
|
+
client = AsyncLlamaCloud(
|
|
789
789
|
token="YOUR_TOKEN",
|
|
790
790
|
base_url="https://yourhost.com/path/to/api",
|
|
791
791
|
)
|
|
@@ -818,9 +818,9 @@ class AsyncDeprecatedClient:
|
|
|
818
818
|
Parameters:
|
|
819
819
|
- job_id: str.
|
|
820
820
|
---
|
|
821
|
-
from platform.client import
|
|
821
|
+
from platform.client import AsyncLlamaCloud
|
|
822
822
|
|
|
823
|
-
client =
|
|
823
|
+
client = AsyncLlamaCloud(
|
|
824
824
|
token="YOUR_TOKEN",
|
|
825
825
|
base_url="https://yourhost.com/path/to/api",
|
|
826
826
|
)
|
|
@@ -853,9 +853,9 @@ class AsyncDeprecatedClient:
|
|
|
853
853
|
Parameters:
|
|
854
854
|
- job_id: str.
|
|
855
855
|
---
|
|
856
|
-
from platform.client import
|
|
856
|
+
from platform.client import AsyncLlamaCloud
|
|
857
857
|
|
|
858
|
-
client =
|
|
858
|
+
client = AsyncLlamaCloud(
|
|
859
859
|
token="YOUR_TOKEN",
|
|
860
860
|
base_url="https://yourhost.com/path/to/api",
|
|
861
861
|
)
|
|
@@ -886,9 +886,9 @@ class AsyncDeprecatedClient:
|
|
|
886
886
|
Parameters:
|
|
887
887
|
- job_id: str.
|
|
888
888
|
---
|
|
889
|
-
from platform.client import
|
|
889
|
+
from platform.client import AsyncLlamaCloud
|
|
890
890
|
|
|
891
|
-
client =
|
|
891
|
+
client = AsyncLlamaCloud(
|
|
892
892
|
token="YOUR_TOKEN",
|
|
893
893
|
base_url="https://yourhost.com/path/to/api",
|
|
894
894
|
)
|
|
@@ -919,9 +919,9 @@ class AsyncDeprecatedClient:
|
|
|
919
919
|
Get parsing history for user
|
|
920
920
|
|
|
921
921
|
---
|
|
922
|
-
from platform.client import
|
|
922
|
+
from platform.client import AsyncLlamaCloud
|
|
923
923
|
|
|
924
|
-
client =
|
|
924
|
+
client = AsyncLlamaCloud(
|
|
925
925
|
token="YOUR_TOKEN",
|
|
926
926
|
base_url="https://yourhost.com/path/to/api",
|
|
927
927
|
)
|
|
@@ -952,9 +952,9 @@ class AsyncDeprecatedClient:
|
|
|
952
952
|
|
|
953
953
|
- filename: str.
|
|
954
954
|
---
|
|
955
|
-
from platform.client import
|
|
955
|
+
from platform.client import AsyncLlamaCloud
|
|
956
956
|
|
|
957
|
-
client =
|
|
957
|
+
client = AsyncLlamaCloud(
|
|
958
958
|
token="YOUR_TOKEN",
|
|
959
959
|
base_url="https://yourhost.com/path/to/api",
|
|
960
960
|
)
|
|
@@ -34,9 +34,9 @@ class EvalsClient:
|
|
|
34
34
|
Parameters:
|
|
35
35
|
- dataset_id: str.
|
|
36
36
|
---
|
|
37
|
-
from platform.client import
|
|
37
|
+
from platform.client import LlamaCloud
|
|
38
38
|
|
|
39
|
-
client =
|
|
39
|
+
client = LlamaCloud(
|
|
40
40
|
token="YOUR_TOKEN",
|
|
41
41
|
base_url="https://yourhost.com/path/to/api",
|
|
42
42
|
)
|
|
@@ -69,9 +69,9 @@ class EvalsClient:
|
|
|
69
69
|
|
|
70
70
|
- name: str. The name of the EvalDataset.
|
|
71
71
|
---
|
|
72
|
-
from platform.client import
|
|
72
|
+
from platform.client import LlamaCloud
|
|
73
73
|
|
|
74
|
-
client =
|
|
74
|
+
client = LlamaCloud(
|
|
75
75
|
token="YOUR_TOKEN",
|
|
76
76
|
base_url="https://yourhost.com/path/to/api",
|
|
77
77
|
)
|
|
@@ -104,9 +104,9 @@ class EvalsClient:
|
|
|
104
104
|
Parameters:
|
|
105
105
|
- dataset_id: str.
|
|
106
106
|
---
|
|
107
|
-
from platform.client import
|
|
107
|
+
from platform.client import LlamaCloud
|
|
108
108
|
|
|
109
|
-
client =
|
|
109
|
+
client = LlamaCloud(
|
|
110
110
|
token="YOUR_TOKEN",
|
|
111
111
|
base_url="https://yourhost.com/path/to/api",
|
|
112
112
|
)
|
|
@@ -137,9 +137,9 @@ class EvalsClient:
|
|
|
137
137
|
Parameters:
|
|
138
138
|
- dataset_id: str.
|
|
139
139
|
---
|
|
140
|
-
from platform.client import
|
|
140
|
+
from platform.client import LlamaCloud
|
|
141
141
|
|
|
142
|
-
client =
|
|
142
|
+
client = LlamaCloud(
|
|
143
143
|
token="YOUR_TOKEN",
|
|
144
144
|
base_url="https://yourhost.com/path/to/api",
|
|
145
145
|
)
|
|
@@ -175,9 +175,9 @@ class EvalsClient:
|
|
|
175
175
|
- request: EvalQuestionCreate.
|
|
176
176
|
---
|
|
177
177
|
from platform import EvalQuestionCreate
|
|
178
|
-
from platform.client import
|
|
178
|
+
from platform.client import LlamaCloud
|
|
179
179
|
|
|
180
|
-
client =
|
|
180
|
+
client = LlamaCloud(
|
|
181
181
|
token="YOUR_TOKEN",
|
|
182
182
|
base_url="https://yourhost.com/path/to/api",
|
|
183
183
|
)
|
|
@@ -218,9 +218,9 @@ class EvalsClient:
|
|
|
218
218
|
|
|
219
219
|
- request: typing.List[EvalQuestionCreate].
|
|
220
220
|
---
|
|
221
|
-
from platform.client import
|
|
221
|
+
from platform.client import LlamaCloud
|
|
222
222
|
|
|
223
|
-
client =
|
|
223
|
+
client = LlamaCloud(
|
|
224
224
|
token="YOUR_TOKEN",
|
|
225
225
|
base_url="https://yourhost.com/path/to/api",
|
|
226
226
|
)
|
|
@@ -255,9 +255,9 @@ class EvalsClient:
|
|
|
255
255
|
Parameters:
|
|
256
256
|
- question_id: str.
|
|
257
257
|
---
|
|
258
|
-
from platform.client import
|
|
258
|
+
from platform.client import LlamaCloud
|
|
259
259
|
|
|
260
|
-
client =
|
|
260
|
+
client = LlamaCloud(
|
|
261
261
|
token="YOUR_TOKEN",
|
|
262
262
|
base_url="https://yourhost.com/path/to/api",
|
|
263
263
|
)
|
|
@@ -291,9 +291,9 @@ class EvalsClient:
|
|
|
291
291
|
- request: EvalQuestionCreate.
|
|
292
292
|
---
|
|
293
293
|
from platform import EvalQuestionCreate
|
|
294
|
-
from platform.client import
|
|
294
|
+
from platform.client import LlamaCloud
|
|
295
295
|
|
|
296
|
-
client =
|
|
296
|
+
client = LlamaCloud(
|
|
297
297
|
token="YOUR_TOKEN",
|
|
298
298
|
base_url="https://yourhost.com/path/to/api",
|
|
299
299
|
)
|
|
@@ -328,9 +328,9 @@ class EvalsClient:
|
|
|
328
328
|
Parameters:
|
|
329
329
|
- question_id: str.
|
|
330
330
|
---
|
|
331
|
-
from platform.client import
|
|
331
|
+
from platform.client import LlamaCloud
|
|
332
332
|
|
|
333
|
-
client =
|
|
333
|
+
client = LlamaCloud(
|
|
334
334
|
token="YOUR_TOKEN",
|
|
335
335
|
base_url="https://yourhost.com/path/to/api",
|
|
336
336
|
)
|
|
@@ -359,9 +359,9 @@ class EvalsClient:
|
|
|
359
359
|
Get all supported models.
|
|
360
360
|
|
|
361
361
|
---
|
|
362
|
-
from platform.client import
|
|
362
|
+
from platform.client import LlamaCloud
|
|
363
363
|
|
|
364
|
-
client =
|
|
364
|
+
client = LlamaCloud(
|
|
365
365
|
token="YOUR_TOKEN",
|
|
366
366
|
base_url="https://yourhost.com/path/to/api",
|
|
367
367
|
)
|
|
@@ -395,9 +395,9 @@ class AsyncEvalsClient:
|
|
|
395
395
|
Parameters:
|
|
396
396
|
- dataset_id: str.
|
|
397
397
|
---
|
|
398
|
-
from platform.client import
|
|
398
|
+
from platform.client import AsyncLlamaCloud
|
|
399
399
|
|
|
400
|
-
client =
|
|
400
|
+
client = AsyncLlamaCloud(
|
|
401
401
|
token="YOUR_TOKEN",
|
|
402
402
|
base_url="https://yourhost.com/path/to/api",
|
|
403
403
|
)
|
|
@@ -430,9 +430,9 @@ class AsyncEvalsClient:
|
|
|
430
430
|
|
|
431
431
|
- name: str. The name of the EvalDataset.
|
|
432
432
|
---
|
|
433
|
-
from platform.client import
|
|
433
|
+
from platform.client import AsyncLlamaCloud
|
|
434
434
|
|
|
435
|
-
client =
|
|
435
|
+
client = AsyncLlamaCloud(
|
|
436
436
|
token="YOUR_TOKEN",
|
|
437
437
|
base_url="https://yourhost.com/path/to/api",
|
|
438
438
|
)
|
|
@@ -465,9 +465,9 @@ class AsyncEvalsClient:
|
|
|
465
465
|
Parameters:
|
|
466
466
|
- dataset_id: str.
|
|
467
467
|
---
|
|
468
|
-
from platform.client import
|
|
468
|
+
from platform.client import AsyncLlamaCloud
|
|
469
469
|
|
|
470
|
-
client =
|
|
470
|
+
client = AsyncLlamaCloud(
|
|
471
471
|
token="YOUR_TOKEN",
|
|
472
472
|
base_url="https://yourhost.com/path/to/api",
|
|
473
473
|
)
|
|
@@ -498,9 +498,9 @@ class AsyncEvalsClient:
|
|
|
498
498
|
Parameters:
|
|
499
499
|
- dataset_id: str.
|
|
500
500
|
---
|
|
501
|
-
from platform.client import
|
|
501
|
+
from platform.client import AsyncLlamaCloud
|
|
502
502
|
|
|
503
|
-
client =
|
|
503
|
+
client = AsyncLlamaCloud(
|
|
504
504
|
token="YOUR_TOKEN",
|
|
505
505
|
base_url="https://yourhost.com/path/to/api",
|
|
506
506
|
)
|
|
@@ -536,9 +536,9 @@ class AsyncEvalsClient:
|
|
|
536
536
|
- request: EvalQuestionCreate.
|
|
537
537
|
---
|
|
538
538
|
from platform import EvalQuestionCreate
|
|
539
|
-
from platform.client import
|
|
539
|
+
from platform.client import AsyncLlamaCloud
|
|
540
540
|
|
|
541
|
-
client =
|
|
541
|
+
client = AsyncLlamaCloud(
|
|
542
542
|
token="YOUR_TOKEN",
|
|
543
543
|
base_url="https://yourhost.com/path/to/api",
|
|
544
544
|
)
|
|
@@ -579,9 +579,9 @@ class AsyncEvalsClient:
|
|
|
579
579
|
|
|
580
580
|
- request: typing.List[EvalQuestionCreate].
|
|
581
581
|
---
|
|
582
|
-
from platform.client import
|
|
582
|
+
from platform.client import AsyncLlamaCloud
|
|
583
583
|
|
|
584
|
-
client =
|
|
584
|
+
client = AsyncLlamaCloud(
|
|
585
585
|
token="YOUR_TOKEN",
|
|
586
586
|
base_url="https://yourhost.com/path/to/api",
|
|
587
587
|
)
|
|
@@ -616,9 +616,9 @@ class AsyncEvalsClient:
|
|
|
616
616
|
Parameters:
|
|
617
617
|
- question_id: str.
|
|
618
618
|
---
|
|
619
|
-
from platform.client import
|
|
619
|
+
from platform.client import AsyncLlamaCloud
|
|
620
620
|
|
|
621
|
-
client =
|
|
621
|
+
client = AsyncLlamaCloud(
|
|
622
622
|
token="YOUR_TOKEN",
|
|
623
623
|
base_url="https://yourhost.com/path/to/api",
|
|
624
624
|
)
|
|
@@ -652,9 +652,9 @@ class AsyncEvalsClient:
|
|
|
652
652
|
- request: EvalQuestionCreate.
|
|
653
653
|
---
|
|
654
654
|
from platform import EvalQuestionCreate
|
|
655
|
-
from platform.client import
|
|
655
|
+
from platform.client import AsyncLlamaCloud
|
|
656
656
|
|
|
657
|
-
client =
|
|
657
|
+
client = AsyncLlamaCloud(
|
|
658
658
|
token="YOUR_TOKEN",
|
|
659
659
|
base_url="https://yourhost.com/path/to/api",
|
|
660
660
|
)
|
|
@@ -689,9 +689,9 @@ class AsyncEvalsClient:
|
|
|
689
689
|
Parameters:
|
|
690
690
|
- question_id: str.
|
|
691
691
|
---
|
|
692
|
-
from platform.client import
|
|
692
|
+
from platform.client import AsyncLlamaCloud
|
|
693
693
|
|
|
694
|
-
client =
|
|
694
|
+
client = AsyncLlamaCloud(
|
|
695
695
|
token="YOUR_TOKEN",
|
|
696
696
|
base_url="https://yourhost.com/path/to/api",
|
|
697
697
|
)
|
|
@@ -720,9 +720,9 @@ class AsyncEvalsClient:
|
|
|
720
720
|
Get all supported models.
|
|
721
721
|
|
|
722
722
|
---
|
|
723
|
-
from platform.client import
|
|
723
|
+
from platform.client import AsyncLlamaCloud
|
|
724
724
|
|
|
725
|
-
client =
|
|
725
|
+
client = AsyncLlamaCloud(
|
|
726
726
|
token="YOUR_TOKEN",
|
|
727
727
|
base_url="https://yourhost.com/path/to/api",
|
|
728
728
|
)
|
|
@@ -37,9 +37,9 @@ class FilesClient:
|
|
|
37
37
|
|
|
38
38
|
- project_id: typing.Optional[str].
|
|
39
39
|
---
|
|
40
|
-
from platform.client import
|
|
40
|
+
from platform.client import LlamaCloud
|
|
41
41
|
|
|
42
|
-
client =
|
|
42
|
+
client = LlamaCloud(
|
|
43
43
|
token="YOUR_TOKEN",
|
|
44
44
|
base_url="https://yourhost.com/path/to/api",
|
|
45
45
|
)
|
|
@@ -73,9 +73,9 @@ class FilesClient:
|
|
|
73
73
|
|
|
74
74
|
- project_id: typing.Optional[str].
|
|
75
75
|
---
|
|
76
|
-
from platform.client import
|
|
76
|
+
from platform.client import LlamaCloud
|
|
77
77
|
|
|
78
|
-
client =
|
|
78
|
+
client = LlamaCloud(
|
|
79
79
|
token="YOUR_TOKEN",
|
|
80
80
|
base_url="https://yourhost.com/path/to/api",
|
|
81
81
|
)
|
|
@@ -107,9 +107,9 @@ class FilesClient:
|
|
|
107
107
|
Parameters:
|
|
108
108
|
- project_id: typing.Optional[str].
|
|
109
109
|
---
|
|
110
|
-
from platform.client import
|
|
110
|
+
from platform.client import LlamaCloud
|
|
111
111
|
|
|
112
|
-
client =
|
|
112
|
+
client = LlamaCloud(
|
|
113
113
|
token="YOUR_TOKEN",
|
|
114
114
|
base_url="https://yourhost.com/path/to/api",
|
|
115
115
|
)
|
|
@@ -186,9 +186,9 @@ class FilesClient:
|
|
|
186
186
|
|
|
187
187
|
- data_source_id: typing.Optional[str]. The ID of the data source that the file belongs to
|
|
188
188
|
---
|
|
189
|
-
from platform.client import
|
|
189
|
+
from platform.client import LlamaCloud
|
|
190
190
|
|
|
191
|
-
client =
|
|
191
|
+
client = LlamaCloud(
|
|
192
192
|
token="YOUR_TOKEN",
|
|
193
193
|
base_url="https://yourhost.com/path/to/api",
|
|
194
194
|
)
|
|
@@ -230,9 +230,9 @@ class FilesClient:
|
|
|
230
230
|
Parameters:
|
|
231
231
|
- project_id: typing.Optional[str].
|
|
232
232
|
---
|
|
233
|
-
from platform.client import
|
|
233
|
+
from platform.client import LlamaCloud
|
|
234
234
|
|
|
235
|
-
client =
|
|
235
|
+
client = LlamaCloud(
|
|
236
236
|
token="YOUR_TOKEN",
|
|
237
237
|
base_url="https://yourhost.com/path/to/api",
|
|
238
238
|
)
|
|
@@ -264,9 +264,9 @@ class FilesClient:
|
|
|
264
264
|
|
|
265
265
|
- project_id: typing.Optional[str].
|
|
266
266
|
---
|
|
267
|
-
from platform.client import
|
|
267
|
+
from platform.client import LlamaCloud
|
|
268
268
|
|
|
269
|
-
client =
|
|
269
|
+
client = LlamaCloud(
|
|
270
270
|
token="YOUR_TOKEN",
|
|
271
271
|
base_url="https://yourhost.com/path/to/api",
|
|
272
272
|
)
|
|
@@ -305,9 +305,9 @@ class AsyncFilesClient:
|
|
|
305
305
|
|
|
306
306
|
- project_id: typing.Optional[str].
|
|
307
307
|
---
|
|
308
|
-
from platform.client import
|
|
308
|
+
from platform.client import AsyncLlamaCloud
|
|
309
309
|
|
|
310
|
-
client =
|
|
310
|
+
client = AsyncLlamaCloud(
|
|
311
311
|
token="YOUR_TOKEN",
|
|
312
312
|
base_url="https://yourhost.com/path/to/api",
|
|
313
313
|
)
|
|
@@ -341,9 +341,9 @@ class AsyncFilesClient:
|
|
|
341
341
|
|
|
342
342
|
- project_id: typing.Optional[str].
|
|
343
343
|
---
|
|
344
|
-
from platform.client import
|
|
344
|
+
from platform.client import AsyncLlamaCloud
|
|
345
345
|
|
|
346
|
-
client =
|
|
346
|
+
client = AsyncLlamaCloud(
|
|
347
347
|
token="YOUR_TOKEN",
|
|
348
348
|
base_url="https://yourhost.com/path/to/api",
|
|
349
349
|
)
|
|
@@ -375,9 +375,9 @@ class AsyncFilesClient:
|
|
|
375
375
|
Parameters:
|
|
376
376
|
- project_id: typing.Optional[str].
|
|
377
377
|
---
|
|
378
|
-
from platform.client import
|
|
378
|
+
from platform.client import AsyncLlamaCloud
|
|
379
379
|
|
|
380
|
-
client =
|
|
380
|
+
client = AsyncLlamaCloud(
|
|
381
381
|
token="YOUR_TOKEN",
|
|
382
382
|
base_url="https://yourhost.com/path/to/api",
|
|
383
383
|
)
|
|
@@ -454,9 +454,9 @@ class AsyncFilesClient:
|
|
|
454
454
|
|
|
455
455
|
- data_source_id: typing.Optional[str]. The ID of the data source that the file belongs to
|
|
456
456
|
---
|
|
457
|
-
from platform.client import
|
|
457
|
+
from platform.client import AsyncLlamaCloud
|
|
458
458
|
|
|
459
|
-
client =
|
|
459
|
+
client = AsyncLlamaCloud(
|
|
460
460
|
token="YOUR_TOKEN",
|
|
461
461
|
base_url="https://yourhost.com/path/to/api",
|
|
462
462
|
)
|
|
@@ -498,9 +498,9 @@ class AsyncFilesClient:
|
|
|
498
498
|
Parameters:
|
|
499
499
|
- project_id: typing.Optional[str].
|
|
500
500
|
---
|
|
501
|
-
from platform.client import
|
|
501
|
+
from platform.client import AsyncLlamaCloud
|
|
502
502
|
|
|
503
|
-
client =
|
|
503
|
+
client = AsyncLlamaCloud(
|
|
504
504
|
token="YOUR_TOKEN",
|
|
505
505
|
base_url="https://yourhost.com/path/to/api",
|
|
506
506
|
)
|
|
@@ -532,9 +532,9 @@ class AsyncFilesClient:
|
|
|
532
532
|
|
|
533
533
|
- project_id: typing.Optional[str].
|
|
534
534
|
---
|
|
535
|
-
from platform.client import
|
|
535
|
+
from platform.client import AsyncLlamaCloud
|
|
536
536
|
|
|
537
|
-
client =
|
|
537
|
+
client = AsyncLlamaCloud(
|
|
538
538
|
token="YOUR_TOKEN",
|
|
539
539
|
base_url="https://yourhost.com/path/to/api",
|
|
540
540
|
)
|