llama-cloud 0.0.1__py3-none-any.whl → 0.0.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of llama-cloud might be problematic. Click here for more details.

Files changed (102) hide show
  1. llama_cloud/client.py +2 -2
  2. llama_cloud/core/jsonable_encoder.py +3 -0
  3. llama_cloud/resources/api_keys/client.py +19 -16
  4. llama_cloud/resources/billing/client.py +15 -12
  5. llama_cloud/resources/component_definitions/client.py +15 -12
  6. llama_cloud/resources/data_sinks/client.py +33 -30
  7. llama_cloud/resources/data_sources/client.py +33 -30
  8. llama_cloud/resources/deprecated/client.py +51 -48
  9. llama_cloud/resources/evals/client.py +47 -44
  10. llama_cloud/resources/files/client.py +27 -24
  11. llama_cloud/resources/parsing/client.py +51 -48
  12. llama_cloud/resources/pipelines/client.py +238 -164
  13. llama_cloud/resources/projects/client.py +75 -72
  14. llama_cloud/types/api_key.py +3 -0
  15. llama_cloud/types/azure_open_ai_embedding.py +3 -0
  16. llama_cloud/types/base.py +3 -0
  17. llama_cloud/types/base_prompt_template.py +3 -0
  18. llama_cloud/types/bedrock_embedding.py +3 -0
  19. llama_cloud/types/chat_message.py +3 -0
  20. llama_cloud/types/cloud_az_storage_blob_data_source.py +3 -0
  21. llama_cloud/types/cloud_chroma_vector_store.py +3 -0
  22. llama_cloud/types/cloud_document.py +3 -0
  23. llama_cloud/types/cloud_document_create.py +3 -0
  24. llama_cloud/types/cloud_gcs_data_source.py +3 -0
  25. llama_cloud/types/cloud_google_drive_data_source.py +3 -0
  26. llama_cloud/types/cloud_one_drive_data_source.py +3 -0
  27. llama_cloud/types/cloud_pinecone_vector_store.py +3 -0
  28. llama_cloud/types/cloud_postgres_vector_store.py +3 -0
  29. llama_cloud/types/cloud_qdrant_vector_store.py +3 -0
  30. llama_cloud/types/cloud_s_3_data_source.py +3 -0
  31. llama_cloud/types/cloud_sharepoint_data_source.py +3 -0
  32. llama_cloud/types/cloud_weaviate_vector_store.py +3 -0
  33. llama_cloud/types/code_splitter.py +3 -0
  34. llama_cloud/types/cohere_embedding.py +3 -0
  35. llama_cloud/types/configurable_transformation_definition.py +3 -0
  36. llama_cloud/types/configured_transformation_item.py +3 -0
  37. llama_cloud/types/data_sink.py +3 -0
  38. llama_cloud/types/data_sink_create.py +3 -0
  39. llama_cloud/types/data_sink_definition.py +3 -0
  40. llama_cloud/types/data_source.py +3 -0
  41. llama_cloud/types/data_source_create.py +3 -0
  42. llama_cloud/types/data_source_definition.py +3 -0
  43. llama_cloud/types/eval_dataset.py +3 -0
  44. llama_cloud/types/eval_dataset_job_params.py +3 -0
  45. llama_cloud/types/eval_dataset_job_record.py +3 -0
  46. llama_cloud/types/eval_execution_params.py +3 -0
  47. llama_cloud/types/eval_execution_params_override.py +3 -0
  48. llama_cloud/types/eval_llm_model_data.py +3 -0
  49. llama_cloud/types/eval_question.py +3 -0
  50. llama_cloud/types/eval_question_create.py +3 -0
  51. llama_cloud/types/eval_question_result.py +3 -0
  52. llama_cloud/types/file.py +3 -0
  53. llama_cloud/types/gemini_embedding.py +3 -0
  54. llama_cloud/types/html_node_parser.py +3 -0
  55. llama_cloud/types/http_validation_error.py +3 -0
  56. llama_cloud/types/hugging_face_inference_api_embedding.py +3 -0
  57. llama_cloud/types/json_node_parser.py +3 -0
  58. llama_cloud/types/llm.py +3 -0
  59. llama_cloud/types/local_eval.py +3 -0
  60. llama_cloud/types/local_eval_results.py +3 -0
  61. llama_cloud/types/local_eval_sets.py +3 -0
  62. llama_cloud/types/markdown_element_node_parser.py +3 -0
  63. llama_cloud/types/markdown_node_parser.py +3 -0
  64. llama_cloud/types/metadata_filter.py +3 -0
  65. llama_cloud/types/metadata_filters.py +3 -0
  66. llama_cloud/types/metric_result.py +3 -0
  67. llama_cloud/types/node_parser.py +3 -0
  68. llama_cloud/types/open_ai_embedding.py +3 -0
  69. llama_cloud/types/parsing_history_item.py +3 -0
  70. llama_cloud/types/parsing_job.py +3 -0
  71. llama_cloud/types/parsing_job_json_result.py +3 -0
  72. llama_cloud/types/parsing_job_markdown_result.py +3 -0
  73. llama_cloud/types/parsing_job_text_result.py +3 -0
  74. llama_cloud/types/parsing_usage.py +3 -0
  75. llama_cloud/types/pipeline.py +3 -0
  76. llama_cloud/types/pipeline_create.py +3 -0
  77. llama_cloud/types/pipeline_data_source.py +3 -0
  78. llama_cloud/types/pipeline_data_source_create.py +3 -0
  79. llama_cloud/types/pipeline_deployment.py +3 -0
  80. llama_cloud/types/pipeline_file.py +3 -0
  81. llama_cloud/types/pipeline_file_create.py +3 -0
  82. llama_cloud/types/pipeline_file_status_response.py +3 -0
  83. llama_cloud/types/preset_retrieval_params.py +3 -0
  84. llama_cloud/types/presigned_url.py +3 -0
  85. llama_cloud/types/project.py +3 -0
  86. llama_cloud/types/project_create.py +3 -0
  87. llama_cloud/types/prompt_mixin_prompts.py +3 -0
  88. llama_cloud/types/prompt_spec.py +3 -0
  89. llama_cloud/types/related_node_info.py +3 -0
  90. llama_cloud/types/retrieve_results.py +3 -0
  91. llama_cloud/types/sentence_splitter.py +3 -0
  92. llama_cloud/types/simple_file_node_parser.py +3 -0
  93. llama_cloud/types/supported_eval_llm_model.py +3 -0
  94. llama_cloud/types/text_node.py +3 -0
  95. llama_cloud/types/text_node_with_score.py +3 -0
  96. llama_cloud/types/token_text_splitter.py +3 -0
  97. llama_cloud/types/validation_error.py +3 -0
  98. {llama_cloud-0.0.1.dist-info → llama_cloud-0.0.3.dist-info}/METADATA +1 -1
  99. llama_cloud-0.0.3.dist-info/RECORD +173 -0
  100. llama_cloud-0.0.1.dist-info/RECORD +0 -173
  101. {llama_cloud-0.0.1.dist-info → llama_cloud-0.0.3.dist-info}/LICENSE +0 -0
  102. {llama_cloud-0.0.1.dist-info → llama_cloud-0.0.3.dist-info}/WHEEL +0 -0
@@ -20,6 +20,9 @@ from ...types.parsing_usage import ParsingUsage
20
20
  from ...types.presigned_url import PresignedUrl
21
21
 
22
22
  try:
23
+ import pydantic
24
+ if pydantic.__version__.startswith("1."):
25
+ raise ImportError
23
26
  import pydantic.v1 as pydantic # type: ignore
24
27
  except ImportError:
25
28
  import pydantic # type: ignore
@@ -41,9 +44,9 @@ class DeprecatedClient:
41
44
 
42
45
  - name: str.
43
46
  ---
44
- from platform.client import PlatformApi
47
+ from llama_cloud.client import LlamaCloud
45
48
 
46
- client = PlatformApi(
49
+ client = LlamaCloud(
47
50
  token="YOUR_TOKEN",
48
51
  base_url="https://yourhost.com/path/to/api",
49
52
  )
@@ -75,9 +78,9 @@ class DeprecatedClient:
75
78
  Get a list of supported file extensions
76
79
 
77
80
  ---
78
- from platform.client import PlatformApi
81
+ from llama_cloud.client import LlamaCloud
79
82
 
80
- client = PlatformApi(
83
+ client = LlamaCloud(
81
84
  token="YOUR_TOKEN",
82
85
  base_url="https://yourhost.com/path/to/api",
83
86
  )
@@ -174,9 +177,9 @@ class DeprecatedClient:
174
177
  Get parsing usage for user
175
178
 
176
179
  ---
177
- from platform.client import PlatformApi
180
+ from llama_cloud.client import LlamaCloud
178
181
 
179
- client = PlatformApi(
182
+ client = LlamaCloud(
180
183
  token="YOUR_TOKEN",
181
184
  base_url="https://yourhost.com/path/to/api",
182
185
  )
@@ -205,9 +208,9 @@ class DeprecatedClient:
205
208
  Parameters:
206
209
  - job_id: str.
207
210
  ---
208
- from platform.client import PlatformApi
211
+ from llama_cloud.client import LlamaCloud
209
212
 
210
- client = PlatformApi(
213
+ client = LlamaCloud(
211
214
  token="YOUR_TOKEN",
212
215
  base_url="https://yourhost.com/path/to/api",
213
216
  )
@@ -238,9 +241,9 @@ class DeprecatedClient:
238
241
  Parameters:
239
242
  - job_id: str.
240
243
  ---
241
- from platform.client import PlatformApi
244
+ from llama_cloud.client import LlamaCloud
242
245
 
243
- client = PlatformApi(
246
+ client = LlamaCloud(
244
247
  token="YOUR_TOKEN",
245
248
  base_url="https://yourhost.com/path/to/api",
246
249
  )
@@ -271,9 +274,9 @@ class DeprecatedClient:
271
274
  Parameters:
272
275
  - job_id: str.
273
276
  ---
274
- from platform.client import PlatformApi
277
+ from llama_cloud.client import LlamaCloud
275
278
 
276
- client = PlatformApi(
279
+ client = LlamaCloud(
277
280
  token="YOUR_TOKEN",
278
281
  base_url="https://yourhost.com/path/to/api",
279
282
  )
@@ -306,9 +309,9 @@ class DeprecatedClient:
306
309
  Parameters:
307
310
  - job_id: str.
308
311
  ---
309
- from platform.client import PlatformApi
312
+ from llama_cloud.client import LlamaCloud
310
313
 
311
- client = PlatformApi(
314
+ client = LlamaCloud(
312
315
  token="YOUR_TOKEN",
313
316
  base_url="https://yourhost.com/path/to/api",
314
317
  )
@@ -341,9 +344,9 @@ class DeprecatedClient:
341
344
  Parameters:
342
345
  - job_id: str.
343
346
  ---
344
- from platform.client import PlatformApi
347
+ from llama_cloud.client import LlamaCloud
345
348
 
346
- client = PlatformApi(
349
+ client = LlamaCloud(
347
350
  token="YOUR_TOKEN",
348
351
  base_url="https://yourhost.com/path/to/api",
349
352
  )
@@ -376,9 +379,9 @@ class DeprecatedClient:
376
379
  Parameters:
377
380
  - job_id: str.
378
381
  ---
379
- from platform.client import PlatformApi
382
+ from llama_cloud.client import LlamaCloud
380
383
 
381
- client = PlatformApi(
384
+ client = LlamaCloud(
382
385
  token="YOUR_TOKEN",
383
386
  base_url="https://yourhost.com/path/to/api",
384
387
  )
@@ -409,9 +412,9 @@ class DeprecatedClient:
409
412
  Parameters:
410
413
  - job_id: str.
411
414
  ---
412
- from platform.client import PlatformApi
415
+ from llama_cloud.client import LlamaCloud
413
416
 
414
- client = PlatformApi(
417
+ client = LlamaCloud(
415
418
  token="YOUR_TOKEN",
416
419
  base_url="https://yourhost.com/path/to/api",
417
420
  )
@@ -442,9 +445,9 @@ class DeprecatedClient:
442
445
  Get parsing history for user
443
446
 
444
447
  ---
445
- from platform.client import PlatformApi
448
+ from llama_cloud.client import LlamaCloud
446
449
 
447
- client = PlatformApi(
450
+ client = LlamaCloud(
448
451
  token="YOUR_TOKEN",
449
452
  base_url="https://yourhost.com/path/to/api",
450
453
  )
@@ -475,9 +478,9 @@ class DeprecatedClient:
475
478
 
476
479
  - filename: str.
477
480
  ---
478
- from platform.client import PlatformApi
481
+ from llama_cloud.client import LlamaCloud
479
482
 
480
- client = PlatformApi(
483
+ client = LlamaCloud(
481
484
  token="YOUR_TOKEN",
482
485
  base_url="https://yourhost.com/path/to/api",
483
486
  )
@@ -518,9 +521,9 @@ class AsyncDeprecatedClient:
518
521
 
519
522
  - name: str.
520
523
  ---
521
- from platform.client import AsyncPlatformApi
524
+ from llama_cloud.client import AsyncLlamaCloud
522
525
 
523
- client = AsyncPlatformApi(
526
+ client = AsyncLlamaCloud(
524
527
  token="YOUR_TOKEN",
525
528
  base_url="https://yourhost.com/path/to/api",
526
529
  )
@@ -552,9 +555,9 @@ class AsyncDeprecatedClient:
552
555
  Get a list of supported file extensions
553
556
 
554
557
  ---
555
- from platform.client import AsyncPlatformApi
558
+ from llama_cloud.client import AsyncLlamaCloud
556
559
 
557
- client = AsyncPlatformApi(
560
+ client = AsyncLlamaCloud(
558
561
  token="YOUR_TOKEN",
559
562
  base_url="https://yourhost.com/path/to/api",
560
563
  )
@@ -651,9 +654,9 @@ class AsyncDeprecatedClient:
651
654
  Get parsing usage for user
652
655
 
653
656
  ---
654
- from platform.client import AsyncPlatformApi
657
+ from llama_cloud.client import AsyncLlamaCloud
655
658
 
656
- client = AsyncPlatformApi(
659
+ client = AsyncLlamaCloud(
657
660
  token="YOUR_TOKEN",
658
661
  base_url="https://yourhost.com/path/to/api",
659
662
  )
@@ -682,9 +685,9 @@ class AsyncDeprecatedClient:
682
685
  Parameters:
683
686
  - job_id: str.
684
687
  ---
685
- from platform.client import AsyncPlatformApi
688
+ from llama_cloud.client import AsyncLlamaCloud
686
689
 
687
- client = AsyncPlatformApi(
690
+ client = AsyncLlamaCloud(
688
691
  token="YOUR_TOKEN",
689
692
  base_url="https://yourhost.com/path/to/api",
690
693
  )
@@ -715,9 +718,9 @@ class AsyncDeprecatedClient:
715
718
  Parameters:
716
719
  - job_id: str.
717
720
  ---
718
- from platform.client import AsyncPlatformApi
721
+ from llama_cloud.client import AsyncLlamaCloud
719
722
 
720
- client = AsyncPlatformApi(
723
+ client = AsyncLlamaCloud(
721
724
  token="YOUR_TOKEN",
722
725
  base_url="https://yourhost.com/path/to/api",
723
726
  )
@@ -748,9 +751,9 @@ class AsyncDeprecatedClient:
748
751
  Parameters:
749
752
  - job_id: str.
750
753
  ---
751
- from platform.client import AsyncPlatformApi
754
+ from llama_cloud.client import AsyncLlamaCloud
752
755
 
753
- client = AsyncPlatformApi(
756
+ client = AsyncLlamaCloud(
754
757
  token="YOUR_TOKEN",
755
758
  base_url="https://yourhost.com/path/to/api",
756
759
  )
@@ -783,9 +786,9 @@ class AsyncDeprecatedClient:
783
786
  Parameters:
784
787
  - job_id: str.
785
788
  ---
786
- from platform.client import AsyncPlatformApi
789
+ from llama_cloud.client import AsyncLlamaCloud
787
790
 
788
- client = AsyncPlatformApi(
791
+ client = AsyncLlamaCloud(
789
792
  token="YOUR_TOKEN",
790
793
  base_url="https://yourhost.com/path/to/api",
791
794
  )
@@ -818,9 +821,9 @@ class AsyncDeprecatedClient:
818
821
  Parameters:
819
822
  - job_id: str.
820
823
  ---
821
- from platform.client import AsyncPlatformApi
824
+ from llama_cloud.client import AsyncLlamaCloud
822
825
 
823
- client = AsyncPlatformApi(
826
+ client = AsyncLlamaCloud(
824
827
  token="YOUR_TOKEN",
825
828
  base_url="https://yourhost.com/path/to/api",
826
829
  )
@@ -853,9 +856,9 @@ class AsyncDeprecatedClient:
853
856
  Parameters:
854
857
  - job_id: str.
855
858
  ---
856
- from platform.client import AsyncPlatformApi
859
+ from llama_cloud.client import AsyncLlamaCloud
857
860
 
858
- client = AsyncPlatformApi(
861
+ client = AsyncLlamaCloud(
859
862
  token="YOUR_TOKEN",
860
863
  base_url="https://yourhost.com/path/to/api",
861
864
  )
@@ -886,9 +889,9 @@ class AsyncDeprecatedClient:
886
889
  Parameters:
887
890
  - job_id: str.
888
891
  ---
889
- from platform.client import AsyncPlatformApi
892
+ from llama_cloud.client import AsyncLlamaCloud
890
893
 
891
- client = AsyncPlatformApi(
894
+ client = AsyncLlamaCloud(
892
895
  token="YOUR_TOKEN",
893
896
  base_url="https://yourhost.com/path/to/api",
894
897
  )
@@ -919,9 +922,9 @@ class AsyncDeprecatedClient:
919
922
  Get parsing history for user
920
923
 
921
924
  ---
922
- from platform.client import AsyncPlatformApi
925
+ from llama_cloud.client import AsyncLlamaCloud
923
926
 
924
- client = AsyncPlatformApi(
927
+ client = AsyncLlamaCloud(
925
928
  token="YOUR_TOKEN",
926
929
  base_url="https://yourhost.com/path/to/api",
927
930
  )
@@ -952,9 +955,9 @@ class AsyncDeprecatedClient:
952
955
 
953
956
  - filename: str.
954
957
  ---
955
- from platform.client import AsyncPlatformApi
958
+ from llama_cloud.client import AsyncLlamaCloud
956
959
 
957
- client = AsyncPlatformApi(
960
+ client = AsyncLlamaCloud(
958
961
  token="YOUR_TOKEN",
959
962
  base_url="https://yourhost.com/path/to/api",
960
963
  )
@@ -15,6 +15,9 @@ from ...types.http_validation_error import HttpValidationError
15
15
  from ...types.supported_eval_llm_model import SupportedEvalLlmModel
16
16
 
17
17
  try:
18
+ import pydantic
19
+ if pydantic.__version__.startswith("1."):
20
+ raise ImportError
18
21
  import pydantic.v1 as pydantic # type: ignore
19
22
  except ImportError:
20
23
  import pydantic # type: ignore
@@ -34,9 +37,9 @@ class EvalsClient:
34
37
  Parameters:
35
38
  - dataset_id: str.
36
39
  ---
37
- from platform.client import PlatformApi
40
+ from llama_cloud.client import LlamaCloud
38
41
 
39
- client = PlatformApi(
42
+ client = LlamaCloud(
40
43
  token="YOUR_TOKEN",
41
44
  base_url="https://yourhost.com/path/to/api",
42
45
  )
@@ -69,9 +72,9 @@ class EvalsClient:
69
72
 
70
73
  - name: str. The name of the EvalDataset.
71
74
  ---
72
- from platform.client import PlatformApi
75
+ from llama_cloud.client import LlamaCloud
73
76
 
74
- client = PlatformApi(
77
+ client = LlamaCloud(
75
78
  token="YOUR_TOKEN",
76
79
  base_url="https://yourhost.com/path/to/api",
77
80
  )
@@ -104,9 +107,9 @@ class EvalsClient:
104
107
  Parameters:
105
108
  - dataset_id: str.
106
109
  ---
107
- from platform.client import PlatformApi
110
+ from llama_cloud.client import LlamaCloud
108
111
 
109
- client = PlatformApi(
112
+ client = LlamaCloud(
110
113
  token="YOUR_TOKEN",
111
114
  base_url="https://yourhost.com/path/to/api",
112
115
  )
@@ -137,9 +140,9 @@ class EvalsClient:
137
140
  Parameters:
138
141
  - dataset_id: str.
139
142
  ---
140
- from platform.client import PlatformApi
143
+ from llama_cloud.client import LlamaCloud
141
144
 
142
- client = PlatformApi(
145
+ client = LlamaCloud(
143
146
  token="YOUR_TOKEN",
144
147
  base_url="https://yourhost.com/path/to/api",
145
148
  )
@@ -174,10 +177,10 @@ class EvalsClient:
174
177
 
175
178
  - request: EvalQuestionCreate.
176
179
  ---
177
- from platform import EvalQuestionCreate
178
- from platform.client import PlatformApi
180
+ from llama_cloud import EvalQuestionCreate
181
+ from llama_cloud.client import LlamaCloud
179
182
 
180
- client = PlatformApi(
183
+ client = LlamaCloud(
181
184
  token="YOUR_TOKEN",
182
185
  base_url="https://yourhost.com/path/to/api",
183
186
  )
@@ -218,9 +221,9 @@ class EvalsClient:
218
221
 
219
222
  - request: typing.List[EvalQuestionCreate].
220
223
  ---
221
- from platform.client import PlatformApi
224
+ from llama_cloud.client import LlamaCloud
222
225
 
223
- client = PlatformApi(
226
+ client = LlamaCloud(
224
227
  token="YOUR_TOKEN",
225
228
  base_url="https://yourhost.com/path/to/api",
226
229
  )
@@ -255,9 +258,9 @@ class EvalsClient:
255
258
  Parameters:
256
259
  - question_id: str.
257
260
  ---
258
- from platform.client import PlatformApi
261
+ from llama_cloud.client import LlamaCloud
259
262
 
260
- client = PlatformApi(
263
+ client = LlamaCloud(
261
264
  token="YOUR_TOKEN",
262
265
  base_url="https://yourhost.com/path/to/api",
263
266
  )
@@ -290,10 +293,10 @@ class EvalsClient:
290
293
 
291
294
  - request: EvalQuestionCreate.
292
295
  ---
293
- from platform import EvalQuestionCreate
294
- from platform.client import PlatformApi
296
+ from llama_cloud import EvalQuestionCreate
297
+ from llama_cloud.client import LlamaCloud
295
298
 
296
- client = PlatformApi(
299
+ client = LlamaCloud(
297
300
  token="YOUR_TOKEN",
298
301
  base_url="https://yourhost.com/path/to/api",
299
302
  )
@@ -328,9 +331,9 @@ class EvalsClient:
328
331
  Parameters:
329
332
  - question_id: str.
330
333
  ---
331
- from platform.client import PlatformApi
334
+ from llama_cloud.client import LlamaCloud
332
335
 
333
- client = PlatformApi(
336
+ client = LlamaCloud(
334
337
  token="YOUR_TOKEN",
335
338
  base_url="https://yourhost.com/path/to/api",
336
339
  )
@@ -359,9 +362,9 @@ class EvalsClient:
359
362
  Get all supported models.
360
363
 
361
364
  ---
362
- from platform.client import PlatformApi
365
+ from llama_cloud.client import LlamaCloud
363
366
 
364
- client = PlatformApi(
367
+ client = LlamaCloud(
365
368
  token="YOUR_TOKEN",
366
369
  base_url="https://yourhost.com/path/to/api",
367
370
  )
@@ -395,9 +398,9 @@ class AsyncEvalsClient:
395
398
  Parameters:
396
399
  - dataset_id: str.
397
400
  ---
398
- from platform.client import AsyncPlatformApi
401
+ from llama_cloud.client import AsyncLlamaCloud
399
402
 
400
- client = AsyncPlatformApi(
403
+ client = AsyncLlamaCloud(
401
404
  token="YOUR_TOKEN",
402
405
  base_url="https://yourhost.com/path/to/api",
403
406
  )
@@ -430,9 +433,9 @@ class AsyncEvalsClient:
430
433
 
431
434
  - name: str. The name of the EvalDataset.
432
435
  ---
433
- from platform.client import AsyncPlatformApi
436
+ from llama_cloud.client import AsyncLlamaCloud
434
437
 
435
- client = AsyncPlatformApi(
438
+ client = AsyncLlamaCloud(
436
439
  token="YOUR_TOKEN",
437
440
  base_url="https://yourhost.com/path/to/api",
438
441
  )
@@ -465,9 +468,9 @@ class AsyncEvalsClient:
465
468
  Parameters:
466
469
  - dataset_id: str.
467
470
  ---
468
- from platform.client import AsyncPlatformApi
471
+ from llama_cloud.client import AsyncLlamaCloud
469
472
 
470
- client = AsyncPlatformApi(
473
+ client = AsyncLlamaCloud(
471
474
  token="YOUR_TOKEN",
472
475
  base_url="https://yourhost.com/path/to/api",
473
476
  )
@@ -498,9 +501,9 @@ class AsyncEvalsClient:
498
501
  Parameters:
499
502
  - dataset_id: str.
500
503
  ---
501
- from platform.client import AsyncPlatformApi
504
+ from llama_cloud.client import AsyncLlamaCloud
502
505
 
503
- client = AsyncPlatformApi(
506
+ client = AsyncLlamaCloud(
504
507
  token="YOUR_TOKEN",
505
508
  base_url="https://yourhost.com/path/to/api",
506
509
  )
@@ -535,10 +538,10 @@ class AsyncEvalsClient:
535
538
 
536
539
  - request: EvalQuestionCreate.
537
540
  ---
538
- from platform import EvalQuestionCreate
539
- from platform.client import AsyncPlatformApi
541
+ from llama_cloud import EvalQuestionCreate
542
+ from llama_cloud.client import AsyncLlamaCloud
540
543
 
541
- client = AsyncPlatformApi(
544
+ client = AsyncLlamaCloud(
542
545
  token="YOUR_TOKEN",
543
546
  base_url="https://yourhost.com/path/to/api",
544
547
  )
@@ -579,9 +582,9 @@ class AsyncEvalsClient:
579
582
 
580
583
  - request: typing.List[EvalQuestionCreate].
581
584
  ---
582
- from platform.client import AsyncPlatformApi
585
+ from llama_cloud.client import AsyncLlamaCloud
583
586
 
584
- client = AsyncPlatformApi(
587
+ client = AsyncLlamaCloud(
585
588
  token="YOUR_TOKEN",
586
589
  base_url="https://yourhost.com/path/to/api",
587
590
  )
@@ -616,9 +619,9 @@ class AsyncEvalsClient:
616
619
  Parameters:
617
620
  - question_id: str.
618
621
  ---
619
- from platform.client import AsyncPlatformApi
622
+ from llama_cloud.client import AsyncLlamaCloud
620
623
 
621
- client = AsyncPlatformApi(
624
+ client = AsyncLlamaCloud(
622
625
  token="YOUR_TOKEN",
623
626
  base_url="https://yourhost.com/path/to/api",
624
627
  )
@@ -651,10 +654,10 @@ class AsyncEvalsClient:
651
654
 
652
655
  - request: EvalQuestionCreate.
653
656
  ---
654
- from platform import EvalQuestionCreate
655
- from platform.client import AsyncPlatformApi
657
+ from llama_cloud import EvalQuestionCreate
658
+ from llama_cloud.client import AsyncLlamaCloud
656
659
 
657
- client = AsyncPlatformApi(
660
+ client = AsyncLlamaCloud(
658
661
  token="YOUR_TOKEN",
659
662
  base_url="https://yourhost.com/path/to/api",
660
663
  )
@@ -689,9 +692,9 @@ class AsyncEvalsClient:
689
692
  Parameters:
690
693
  - question_id: str.
691
694
  ---
692
- from platform.client import AsyncPlatformApi
695
+ from llama_cloud.client import AsyncLlamaCloud
693
696
 
694
- client = AsyncPlatformApi(
697
+ client = AsyncLlamaCloud(
695
698
  token="YOUR_TOKEN",
696
699
  base_url="https://yourhost.com/path/to/api",
697
700
  )
@@ -720,9 +723,9 @@ class AsyncEvalsClient:
720
723
  Get all supported models.
721
724
 
722
725
  ---
723
- from platform.client import AsyncPlatformApi
726
+ from llama_cloud.client import AsyncLlamaCloud
724
727
 
725
- client = AsyncPlatformApi(
728
+ client = AsyncLlamaCloud(
726
729
  token="YOUR_TOKEN",
727
730
  base_url="https://yourhost.com/path/to/api",
728
731
  )
@@ -16,6 +16,9 @@ from ...types.presigned_url import PresignedUrl
16
16
  from .types.file_create_resource_info_value import FileCreateResourceInfoValue
17
17
 
18
18
  try:
19
+ import pydantic
20
+ if pydantic.__version__.startswith("1."):
21
+ raise ImportError
19
22
  import pydantic.v1 as pydantic # type: ignore
20
23
  except ImportError:
21
24
  import pydantic # type: ignore
@@ -37,9 +40,9 @@ class FilesClient:
37
40
 
38
41
  - project_id: typing.Optional[str].
39
42
  ---
40
- from platform.client import PlatformApi
43
+ from llama_cloud.client import LlamaCloud
41
44
 
42
- client = PlatformApi(
45
+ client = LlamaCloud(
43
46
  token="YOUR_TOKEN",
44
47
  base_url="https://yourhost.com/path/to/api",
45
48
  )
@@ -73,9 +76,9 @@ class FilesClient:
73
76
 
74
77
  - project_id: typing.Optional[str].
75
78
  ---
76
- from platform.client import PlatformApi
79
+ from llama_cloud.client import LlamaCloud
77
80
 
78
- client = PlatformApi(
81
+ client = LlamaCloud(
79
82
  token="YOUR_TOKEN",
80
83
  base_url="https://yourhost.com/path/to/api",
81
84
  )
@@ -107,9 +110,9 @@ class FilesClient:
107
110
  Parameters:
108
111
  - project_id: typing.Optional[str].
109
112
  ---
110
- from platform.client import PlatformApi
113
+ from llama_cloud.client import LlamaCloud
111
114
 
112
- client = PlatformApi(
115
+ client = LlamaCloud(
113
116
  token="YOUR_TOKEN",
114
117
  base_url="https://yourhost.com/path/to/api",
115
118
  )
@@ -186,9 +189,9 @@ class FilesClient:
186
189
 
187
190
  - data_source_id: typing.Optional[str]. The ID of the data source that the file belongs to
188
191
  ---
189
- from platform.client import PlatformApi
192
+ from llama_cloud.client import LlamaCloud
190
193
 
191
- client = PlatformApi(
194
+ client = LlamaCloud(
192
195
  token="YOUR_TOKEN",
193
196
  base_url="https://yourhost.com/path/to/api",
194
197
  )
@@ -230,9 +233,9 @@ class FilesClient:
230
233
  Parameters:
231
234
  - project_id: typing.Optional[str].
232
235
  ---
233
- from platform.client import PlatformApi
236
+ from llama_cloud.client import LlamaCloud
234
237
 
235
- client = PlatformApi(
238
+ client = LlamaCloud(
236
239
  token="YOUR_TOKEN",
237
240
  base_url="https://yourhost.com/path/to/api",
238
241
  )
@@ -264,9 +267,9 @@ class FilesClient:
264
267
 
265
268
  - project_id: typing.Optional[str].
266
269
  ---
267
- from platform.client import PlatformApi
270
+ from llama_cloud.client import LlamaCloud
268
271
 
269
- client = PlatformApi(
272
+ client = LlamaCloud(
270
273
  token="YOUR_TOKEN",
271
274
  base_url="https://yourhost.com/path/to/api",
272
275
  )
@@ -305,9 +308,9 @@ class AsyncFilesClient:
305
308
 
306
309
  - project_id: typing.Optional[str].
307
310
  ---
308
- from platform.client import AsyncPlatformApi
311
+ from llama_cloud.client import AsyncLlamaCloud
309
312
 
310
- client = AsyncPlatformApi(
313
+ client = AsyncLlamaCloud(
311
314
  token="YOUR_TOKEN",
312
315
  base_url="https://yourhost.com/path/to/api",
313
316
  )
@@ -341,9 +344,9 @@ class AsyncFilesClient:
341
344
 
342
345
  - project_id: typing.Optional[str].
343
346
  ---
344
- from platform.client import AsyncPlatformApi
347
+ from llama_cloud.client import AsyncLlamaCloud
345
348
 
346
- client = AsyncPlatformApi(
349
+ client = AsyncLlamaCloud(
347
350
  token="YOUR_TOKEN",
348
351
  base_url="https://yourhost.com/path/to/api",
349
352
  )
@@ -375,9 +378,9 @@ class AsyncFilesClient:
375
378
  Parameters:
376
379
  - project_id: typing.Optional[str].
377
380
  ---
378
- from platform.client import AsyncPlatformApi
381
+ from llama_cloud.client import AsyncLlamaCloud
379
382
 
380
- client = AsyncPlatformApi(
383
+ client = AsyncLlamaCloud(
381
384
  token="YOUR_TOKEN",
382
385
  base_url="https://yourhost.com/path/to/api",
383
386
  )
@@ -454,9 +457,9 @@ class AsyncFilesClient:
454
457
 
455
458
  - data_source_id: typing.Optional[str]. The ID of the data source that the file belongs to
456
459
  ---
457
- from platform.client import AsyncPlatformApi
460
+ from llama_cloud.client import AsyncLlamaCloud
458
461
 
459
- client = AsyncPlatformApi(
462
+ client = AsyncLlamaCloud(
460
463
  token="YOUR_TOKEN",
461
464
  base_url="https://yourhost.com/path/to/api",
462
465
  )
@@ -498,9 +501,9 @@ class AsyncFilesClient:
498
501
  Parameters:
499
502
  - project_id: typing.Optional[str].
500
503
  ---
501
- from platform.client import AsyncPlatformApi
504
+ from llama_cloud.client import AsyncLlamaCloud
502
505
 
503
- client = AsyncPlatformApi(
506
+ client = AsyncLlamaCloud(
504
507
  token="YOUR_TOKEN",
505
508
  base_url="https://yourhost.com/path/to/api",
506
509
  )
@@ -532,9 +535,9 @@ class AsyncFilesClient:
532
535
 
533
536
  - project_id: typing.Optional[str].
534
537
  ---
535
- from platform.client import AsyncPlatformApi
538
+ from llama_cloud.client import AsyncLlamaCloud
536
539
 
537
- client = AsyncPlatformApi(
540
+ client = AsyncLlamaCloud(
538
541
  token="YOUR_TOKEN",
539
542
  base_url="https://yourhost.com/path/to/api",
540
543
  )