llama-cloud 0.0.1__py3-none-any.whl → 0.0.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of llama-cloud might be problematic. Click here for more details.

Files changed (102) hide show
  1. llama_cloud/client.py +2 -2
  2. llama_cloud/core/jsonable_encoder.py +3 -0
  3. llama_cloud/resources/api_keys/client.py +19 -16
  4. llama_cloud/resources/billing/client.py +15 -12
  5. llama_cloud/resources/component_definitions/client.py +15 -12
  6. llama_cloud/resources/data_sinks/client.py +33 -30
  7. llama_cloud/resources/data_sources/client.py +33 -30
  8. llama_cloud/resources/deprecated/client.py +51 -48
  9. llama_cloud/resources/evals/client.py +47 -44
  10. llama_cloud/resources/files/client.py +27 -24
  11. llama_cloud/resources/parsing/client.py +51 -48
  12. llama_cloud/resources/pipelines/client.py +238 -164
  13. llama_cloud/resources/projects/client.py +75 -72
  14. llama_cloud/types/api_key.py +3 -0
  15. llama_cloud/types/azure_open_ai_embedding.py +3 -0
  16. llama_cloud/types/base.py +3 -0
  17. llama_cloud/types/base_prompt_template.py +3 -0
  18. llama_cloud/types/bedrock_embedding.py +3 -0
  19. llama_cloud/types/chat_message.py +3 -0
  20. llama_cloud/types/cloud_az_storage_blob_data_source.py +3 -0
  21. llama_cloud/types/cloud_chroma_vector_store.py +3 -0
  22. llama_cloud/types/cloud_document.py +3 -0
  23. llama_cloud/types/cloud_document_create.py +3 -0
  24. llama_cloud/types/cloud_gcs_data_source.py +3 -0
  25. llama_cloud/types/cloud_google_drive_data_source.py +3 -0
  26. llama_cloud/types/cloud_one_drive_data_source.py +3 -0
  27. llama_cloud/types/cloud_pinecone_vector_store.py +3 -0
  28. llama_cloud/types/cloud_postgres_vector_store.py +3 -0
  29. llama_cloud/types/cloud_qdrant_vector_store.py +3 -0
  30. llama_cloud/types/cloud_s_3_data_source.py +3 -0
  31. llama_cloud/types/cloud_sharepoint_data_source.py +3 -0
  32. llama_cloud/types/cloud_weaviate_vector_store.py +3 -0
  33. llama_cloud/types/code_splitter.py +3 -0
  34. llama_cloud/types/cohere_embedding.py +3 -0
  35. llama_cloud/types/configurable_transformation_definition.py +3 -0
  36. llama_cloud/types/configured_transformation_item.py +3 -0
  37. llama_cloud/types/data_sink.py +3 -0
  38. llama_cloud/types/data_sink_create.py +3 -0
  39. llama_cloud/types/data_sink_definition.py +3 -0
  40. llama_cloud/types/data_source.py +3 -0
  41. llama_cloud/types/data_source_create.py +3 -0
  42. llama_cloud/types/data_source_definition.py +3 -0
  43. llama_cloud/types/eval_dataset.py +3 -0
  44. llama_cloud/types/eval_dataset_job_params.py +3 -0
  45. llama_cloud/types/eval_dataset_job_record.py +3 -0
  46. llama_cloud/types/eval_execution_params.py +3 -0
  47. llama_cloud/types/eval_execution_params_override.py +3 -0
  48. llama_cloud/types/eval_llm_model_data.py +3 -0
  49. llama_cloud/types/eval_question.py +3 -0
  50. llama_cloud/types/eval_question_create.py +3 -0
  51. llama_cloud/types/eval_question_result.py +3 -0
  52. llama_cloud/types/file.py +3 -0
  53. llama_cloud/types/gemini_embedding.py +3 -0
  54. llama_cloud/types/html_node_parser.py +3 -0
  55. llama_cloud/types/http_validation_error.py +3 -0
  56. llama_cloud/types/hugging_face_inference_api_embedding.py +3 -0
  57. llama_cloud/types/json_node_parser.py +3 -0
  58. llama_cloud/types/llm.py +3 -0
  59. llama_cloud/types/local_eval.py +3 -0
  60. llama_cloud/types/local_eval_results.py +3 -0
  61. llama_cloud/types/local_eval_sets.py +3 -0
  62. llama_cloud/types/markdown_element_node_parser.py +3 -0
  63. llama_cloud/types/markdown_node_parser.py +3 -0
  64. llama_cloud/types/metadata_filter.py +3 -0
  65. llama_cloud/types/metadata_filters.py +3 -0
  66. llama_cloud/types/metric_result.py +3 -0
  67. llama_cloud/types/node_parser.py +3 -0
  68. llama_cloud/types/open_ai_embedding.py +3 -0
  69. llama_cloud/types/parsing_history_item.py +3 -0
  70. llama_cloud/types/parsing_job.py +3 -0
  71. llama_cloud/types/parsing_job_json_result.py +3 -0
  72. llama_cloud/types/parsing_job_markdown_result.py +3 -0
  73. llama_cloud/types/parsing_job_text_result.py +3 -0
  74. llama_cloud/types/parsing_usage.py +3 -0
  75. llama_cloud/types/pipeline.py +3 -0
  76. llama_cloud/types/pipeline_create.py +3 -0
  77. llama_cloud/types/pipeline_data_source.py +3 -0
  78. llama_cloud/types/pipeline_data_source_create.py +3 -0
  79. llama_cloud/types/pipeline_deployment.py +3 -0
  80. llama_cloud/types/pipeline_file.py +3 -0
  81. llama_cloud/types/pipeline_file_create.py +3 -0
  82. llama_cloud/types/pipeline_file_status_response.py +3 -0
  83. llama_cloud/types/preset_retrieval_params.py +3 -0
  84. llama_cloud/types/presigned_url.py +3 -0
  85. llama_cloud/types/project.py +3 -0
  86. llama_cloud/types/project_create.py +3 -0
  87. llama_cloud/types/prompt_mixin_prompts.py +3 -0
  88. llama_cloud/types/prompt_spec.py +3 -0
  89. llama_cloud/types/related_node_info.py +3 -0
  90. llama_cloud/types/retrieve_results.py +3 -0
  91. llama_cloud/types/sentence_splitter.py +3 -0
  92. llama_cloud/types/simple_file_node_parser.py +3 -0
  93. llama_cloud/types/supported_eval_llm_model.py +3 -0
  94. llama_cloud/types/text_node.py +3 -0
  95. llama_cloud/types/text_node_with_score.py +3 -0
  96. llama_cloud/types/token_text_splitter.py +3 -0
  97. llama_cloud/types/validation_error.py +3 -0
  98. {llama_cloud-0.0.1.dist-info → llama_cloud-0.0.3.dist-info}/METADATA +1 -1
  99. llama_cloud-0.0.3.dist-info/RECORD +173 -0
  100. llama_cloud-0.0.1.dist-info/RECORD +0 -173
  101. {llama_cloud-0.0.1.dist-info → llama_cloud-0.0.3.dist-info}/LICENSE +0 -0
  102. {llama_cloud-0.0.1.dist-info → llama_cloud-0.0.3.dist-info}/WHEEL +0 -0
@@ -20,6 +20,9 @@ from ...types.parsing_usage import ParsingUsage
20
20
  from ...types.presigned_url import PresignedUrl
21
21
 
22
22
  try:
23
+ import pydantic
24
+ if pydantic.__version__.startswith("1."):
25
+ raise ImportError
23
26
  import pydantic.v1 as pydantic # type: ignore
24
27
  except ImportError:
25
28
  import pydantic # type: ignore
@@ -41,9 +44,9 @@ class ParsingClient:
41
44
 
42
45
  - name: str.
43
46
  ---
44
- from platform.client import PlatformApi
47
+ from llama_cloud.client import LlamaCloud
45
48
 
46
- client = PlatformApi(
49
+ client = LlamaCloud(
47
50
  token="YOUR_TOKEN",
48
51
  base_url="https://yourhost.com/path/to/api",
49
52
  )
@@ -75,9 +78,9 @@ class ParsingClient:
75
78
  Get a list of supported file extensions
76
79
 
77
80
  ---
78
- from platform.client import PlatformApi
81
+ from llama_cloud.client import LlamaCloud
79
82
 
80
- client = PlatformApi(
83
+ client = LlamaCloud(
81
84
  token="YOUR_TOKEN",
82
85
  base_url="https://yourhost.com/path/to/api",
83
86
  )
@@ -174,9 +177,9 @@ class ParsingClient:
174
177
  Get parsing usage for user
175
178
 
176
179
  ---
177
- from platform.client import PlatformApi
180
+ from llama_cloud.client import LlamaCloud
178
181
 
179
- client = PlatformApi(
182
+ client = LlamaCloud(
180
183
  token="YOUR_TOKEN",
181
184
  base_url="https://yourhost.com/path/to/api",
182
185
  )
@@ -205,9 +208,9 @@ class ParsingClient:
205
208
  Parameters:
206
209
  - job_id: str.
207
210
  ---
208
- from platform.client import PlatformApi
211
+ from llama_cloud.client import LlamaCloud
209
212
 
210
- client = PlatformApi(
213
+ client = LlamaCloud(
211
214
  token="YOUR_TOKEN",
212
215
  base_url="https://yourhost.com/path/to/api",
213
216
  )
@@ -238,9 +241,9 @@ class ParsingClient:
238
241
  Parameters:
239
242
  - job_id: str.
240
243
  ---
241
- from platform.client import PlatformApi
244
+ from llama_cloud.client import LlamaCloud
242
245
 
243
- client = PlatformApi(
246
+ client = LlamaCloud(
244
247
  token="YOUR_TOKEN",
245
248
  base_url="https://yourhost.com/path/to/api",
246
249
  )
@@ -271,9 +274,9 @@ class ParsingClient:
271
274
  Parameters:
272
275
  - job_id: str.
273
276
  ---
274
- from platform.client import PlatformApi
277
+ from llama_cloud.client import LlamaCloud
275
278
 
276
- client = PlatformApi(
279
+ client = LlamaCloud(
277
280
  token="YOUR_TOKEN",
278
281
  base_url="https://yourhost.com/path/to/api",
279
282
  )
@@ -306,9 +309,9 @@ class ParsingClient:
306
309
  Parameters:
307
310
  - job_id: str.
308
311
  ---
309
- from platform.client import PlatformApi
312
+ from llama_cloud.client import LlamaCloud
310
313
 
311
- client = PlatformApi(
314
+ client = LlamaCloud(
312
315
  token="YOUR_TOKEN",
313
316
  base_url="https://yourhost.com/path/to/api",
314
317
  )
@@ -341,9 +344,9 @@ class ParsingClient:
341
344
  Parameters:
342
345
  - job_id: str.
343
346
  ---
344
- from platform.client import PlatformApi
347
+ from llama_cloud.client import LlamaCloud
345
348
 
346
- client = PlatformApi(
349
+ client = LlamaCloud(
347
350
  token="YOUR_TOKEN",
348
351
  base_url="https://yourhost.com/path/to/api",
349
352
  )
@@ -376,9 +379,9 @@ class ParsingClient:
376
379
  Parameters:
377
380
  - job_id: str.
378
381
  ---
379
- from platform.client import PlatformApi
382
+ from llama_cloud.client import LlamaCloud
380
383
 
381
- client = PlatformApi(
384
+ client = LlamaCloud(
382
385
  token="YOUR_TOKEN",
383
386
  base_url="https://yourhost.com/path/to/api",
384
387
  )
@@ -409,9 +412,9 @@ class ParsingClient:
409
412
  Parameters:
410
413
  - job_id: str.
411
414
  ---
412
- from platform.client import PlatformApi
415
+ from llama_cloud.client import LlamaCloud
413
416
 
414
- client = PlatformApi(
417
+ client = LlamaCloud(
415
418
  token="YOUR_TOKEN",
416
419
  base_url="https://yourhost.com/path/to/api",
417
420
  )
@@ -442,9 +445,9 @@ class ParsingClient:
442
445
  Get parsing history for user
443
446
 
444
447
  ---
445
- from platform.client import PlatformApi
448
+ from llama_cloud.client import LlamaCloud
446
449
 
447
- client = PlatformApi(
450
+ client = LlamaCloud(
448
451
  token="YOUR_TOKEN",
449
452
  base_url="https://yourhost.com/path/to/api",
450
453
  )
@@ -475,9 +478,9 @@ class ParsingClient:
475
478
 
476
479
  - filename: str.
477
480
  ---
478
- from platform.client import PlatformApi
481
+ from llama_cloud.client import LlamaCloud
479
482
 
480
- client = PlatformApi(
483
+ client = LlamaCloud(
481
484
  token="YOUR_TOKEN",
482
485
  base_url="https://yourhost.com/path/to/api",
483
486
  )
@@ -518,9 +521,9 @@ class AsyncParsingClient:
518
521
 
519
522
  - name: str.
520
523
  ---
521
- from platform.client import AsyncPlatformApi
524
+ from llama_cloud.client import AsyncLlamaCloud
522
525
 
523
- client = AsyncPlatformApi(
526
+ client = AsyncLlamaCloud(
524
527
  token="YOUR_TOKEN",
525
528
  base_url="https://yourhost.com/path/to/api",
526
529
  )
@@ -552,9 +555,9 @@ class AsyncParsingClient:
552
555
  Get a list of supported file extensions
553
556
 
554
557
  ---
555
- from platform.client import AsyncPlatformApi
558
+ from llama_cloud.client import AsyncLlamaCloud
556
559
 
557
- client = AsyncPlatformApi(
560
+ client = AsyncLlamaCloud(
558
561
  token="YOUR_TOKEN",
559
562
  base_url="https://yourhost.com/path/to/api",
560
563
  )
@@ -651,9 +654,9 @@ class AsyncParsingClient:
651
654
  Get parsing usage for user
652
655
 
653
656
  ---
654
- from platform.client import AsyncPlatformApi
657
+ from llama_cloud.client import AsyncLlamaCloud
655
658
 
656
- client = AsyncPlatformApi(
659
+ client = AsyncLlamaCloud(
657
660
  token="YOUR_TOKEN",
658
661
  base_url="https://yourhost.com/path/to/api",
659
662
  )
@@ -682,9 +685,9 @@ class AsyncParsingClient:
682
685
  Parameters:
683
686
  - job_id: str.
684
687
  ---
685
- from platform.client import AsyncPlatformApi
688
+ from llama_cloud.client import AsyncLlamaCloud
686
689
 
687
- client = AsyncPlatformApi(
690
+ client = AsyncLlamaCloud(
688
691
  token="YOUR_TOKEN",
689
692
  base_url="https://yourhost.com/path/to/api",
690
693
  )
@@ -715,9 +718,9 @@ class AsyncParsingClient:
715
718
  Parameters:
716
719
  - job_id: str.
717
720
  ---
718
- from platform.client import AsyncPlatformApi
721
+ from llama_cloud.client import AsyncLlamaCloud
719
722
 
720
- client = AsyncPlatformApi(
723
+ client = AsyncLlamaCloud(
721
724
  token="YOUR_TOKEN",
722
725
  base_url="https://yourhost.com/path/to/api",
723
726
  )
@@ -748,9 +751,9 @@ class AsyncParsingClient:
748
751
  Parameters:
749
752
  - job_id: str.
750
753
  ---
751
- from platform.client import AsyncPlatformApi
754
+ from llama_cloud.client import AsyncLlamaCloud
752
755
 
753
- client = AsyncPlatformApi(
756
+ client = AsyncLlamaCloud(
754
757
  token="YOUR_TOKEN",
755
758
  base_url="https://yourhost.com/path/to/api",
756
759
  )
@@ -783,9 +786,9 @@ class AsyncParsingClient:
783
786
  Parameters:
784
787
  - job_id: str.
785
788
  ---
786
- from platform.client import AsyncPlatformApi
789
+ from llama_cloud.client import AsyncLlamaCloud
787
790
 
788
- client = AsyncPlatformApi(
791
+ client = AsyncLlamaCloud(
789
792
  token="YOUR_TOKEN",
790
793
  base_url="https://yourhost.com/path/to/api",
791
794
  )
@@ -818,9 +821,9 @@ class AsyncParsingClient:
818
821
  Parameters:
819
822
  - job_id: str.
820
823
  ---
821
- from platform.client import AsyncPlatformApi
824
+ from llama_cloud.client import AsyncLlamaCloud
822
825
 
823
- client = AsyncPlatformApi(
826
+ client = AsyncLlamaCloud(
824
827
  token="YOUR_TOKEN",
825
828
  base_url="https://yourhost.com/path/to/api",
826
829
  )
@@ -853,9 +856,9 @@ class AsyncParsingClient:
853
856
  Parameters:
854
857
  - job_id: str.
855
858
  ---
856
- from platform.client import AsyncPlatformApi
859
+ from llama_cloud.client import AsyncLlamaCloud
857
860
 
858
- client = AsyncPlatformApi(
861
+ client = AsyncLlamaCloud(
859
862
  token="YOUR_TOKEN",
860
863
  base_url="https://yourhost.com/path/to/api",
861
864
  )
@@ -886,9 +889,9 @@ class AsyncParsingClient:
886
889
  Parameters:
887
890
  - job_id: str.
888
891
  ---
889
- from platform.client import AsyncPlatformApi
892
+ from llama_cloud.client import AsyncLlamaCloud
890
893
 
891
- client = AsyncPlatformApi(
894
+ client = AsyncLlamaCloud(
892
895
  token="YOUR_TOKEN",
893
896
  base_url="https://yourhost.com/path/to/api",
894
897
  )
@@ -919,9 +922,9 @@ class AsyncParsingClient:
919
922
  Get parsing history for user
920
923
 
921
924
  ---
922
- from platform.client import AsyncPlatformApi
925
+ from llama_cloud.client import AsyncLlamaCloud
923
926
 
924
- client = AsyncPlatformApi(
927
+ client = AsyncLlamaCloud(
925
928
  token="YOUR_TOKEN",
926
929
  base_url="https://yourhost.com/path/to/api",
927
930
  )
@@ -952,9 +955,9 @@ class AsyncParsingClient:
952
955
 
953
956
  - filename: str.
954
957
  ---
955
- from platform.client import AsyncPlatformApi
958
+ from llama_cloud.client import AsyncLlamaCloud
956
959
 
957
- client = AsyncPlatformApi(
960
+ client = AsyncLlamaCloud(
958
961
  token="YOUR_TOKEN",
959
962
  base_url="https://yourhost.com/path/to/api",
960
963
  )