llama-cloud 0.1.5__py3-none-any.whl → 0.1.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of llama-cloud might be problematic. Click here for more details.

Files changed (105) hide show
  1. llama_cloud/__init__.py +12 -10
  2. llama_cloud/environment.py +1 -1
  3. llama_cloud/resources/__init__.py +2 -1
  4. llama_cloud/resources/data_sinks/client.py +14 -14
  5. llama_cloud/resources/data_sources/client.py +16 -16
  6. llama_cloud/resources/embedding_model_configs/client.py +80 -24
  7. llama_cloud/resources/evals/client.py +36 -26
  8. llama_cloud/resources/extraction/client.py +32 -32
  9. llama_cloud/resources/files/__init__.py +2 -2
  10. llama_cloud/resources/files/client.py +53 -28
  11. llama_cloud/resources/files/types/__init__.py +2 -1
  12. llama_cloud/resources/files/types/file_create_permission_info_value.py +7 -0
  13. llama_cloud/resources/organizations/client.py +60 -56
  14. llama_cloud/resources/parsing/client.py +555 -324
  15. llama_cloud/resources/pipelines/client.py +446 -302
  16. llama_cloud/resources/projects/client.py +270 -136
  17. llama_cloud/types/__init__.py +10 -10
  18. llama_cloud/types/azure_open_ai_embedding.py +12 -6
  19. llama_cloud/types/base_prompt_template.py +6 -2
  20. llama_cloud/types/bedrock_embedding.py +12 -6
  21. llama_cloud/types/character_splitter.py +4 -2
  22. llama_cloud/types/chat_message.py +1 -1
  23. llama_cloud/types/cloud_az_storage_blob_data_source.py +16 -7
  24. llama_cloud/types/cloud_box_data_source.py +13 -6
  25. llama_cloud/types/cloud_confluence_data_source.py +7 -6
  26. llama_cloud/types/cloud_document.py +3 -1
  27. llama_cloud/types/cloud_document_create.py +3 -1
  28. llama_cloud/types/cloud_google_drive_data_source.py +1 -0
  29. llama_cloud/types/cloud_jira_data_source.py +7 -4
  30. llama_cloud/types/cloud_notion_page_data_source.py +3 -2
  31. llama_cloud/types/cloud_one_drive_data_source.py +6 -3
  32. llama_cloud/types/cloud_s_3_data_source.py +9 -4
  33. llama_cloud/types/cloud_sharepoint_data_source.py +9 -6
  34. llama_cloud/types/cloud_slack_data_source.py +7 -6
  35. llama_cloud/types/code_splitter.py +1 -1
  36. llama_cloud/types/cohere_embedding.py +7 -3
  37. llama_cloud/types/data_sink.py +4 -4
  38. llama_cloud/types/data_sink_create.py +1 -1
  39. llama_cloud/types/data_source.py +7 -5
  40. llama_cloud/types/data_source_create.py +4 -2
  41. llama_cloud/types/embedding_model_config.py +2 -2
  42. llama_cloud/types/embedding_model_config_update.py +4 -2
  43. llama_cloud/types/eval_dataset.py +2 -2
  44. llama_cloud/types/eval_dataset_job_record.py +13 -7
  45. llama_cloud/types/eval_execution_params_override.py +6 -2
  46. llama_cloud/types/eval_question.py +2 -2
  47. llama_cloud/types/extraction_result.py +2 -2
  48. llama_cloud/types/extraction_schema.py +5 -3
  49. llama_cloud/types/file.py +15 -7
  50. llama_cloud/types/file_permission_info_value.py +5 -0
  51. llama_cloud/types/filter_operator.py +2 -2
  52. llama_cloud/types/gemini_embedding.py +10 -6
  53. llama_cloud/types/hugging_face_inference_api_embedding.py +27 -11
  54. llama_cloud/types/input_message.py +3 -1
  55. llama_cloud/types/job_name_mapping.py +4 -0
  56. llama_cloud/types/llama_parse_parameters.py +11 -0
  57. llama_cloud/types/llm.py +4 -2
  58. llama_cloud/types/llm_parameters.py +5 -2
  59. llama_cloud/types/local_eval.py +10 -8
  60. llama_cloud/types/local_eval_results.py +1 -1
  61. llama_cloud/types/managed_ingestion_status_response.py +5 -3
  62. llama_cloud/types/markdown_element_node_parser.py +5 -3
  63. llama_cloud/types/markdown_node_parser.py +1 -1
  64. llama_cloud/types/metadata_filter.py +2 -2
  65. llama_cloud/types/metric_result.py +3 -3
  66. llama_cloud/types/node_parser.py +1 -1
  67. llama_cloud/types/open_ai_embedding.py +12 -6
  68. llama_cloud/types/organization.py +2 -2
  69. llama_cloud/types/page_splitter_node_parser.py +2 -2
  70. llama_cloud/types/parsing_job_structured_result.py +32 -0
  71. llama_cloud/types/permission.py +3 -3
  72. llama_cloud/types/pipeline.py +17 -7
  73. llama_cloud/types/pipeline_configuration_hashes.py +3 -3
  74. llama_cloud/types/pipeline_create.py +15 -5
  75. llama_cloud/types/pipeline_data_source.py +13 -7
  76. llama_cloud/types/pipeline_data_source_create.py +3 -1
  77. llama_cloud/types/pipeline_deployment.py +4 -4
  78. llama_cloud/types/pipeline_file.py +25 -11
  79. llama_cloud/types/pipeline_file_create.py +3 -1
  80. llama_cloud/types/pipeline_file_permission_info_value.py +7 -0
  81. llama_cloud/types/playground_session.py +2 -2
  82. llama_cloud/types/preset_retrieval_params.py +14 -7
  83. llama_cloud/types/presigned_url.py +3 -1
  84. llama_cloud/types/project.py +2 -2
  85. llama_cloud/types/prompt_mixin_prompts.py +1 -1
  86. llama_cloud/types/prompt_spec.py +4 -2
  87. llama_cloud/types/role.py +3 -3
  88. llama_cloud/types/sentence_splitter.py +4 -2
  89. llama_cloud/types/text_node.py +3 -3
  90. llama_cloud/types/{hugging_face_inference_api_embedding_token.py → token.py} +1 -1
  91. llama_cloud/types/token_text_splitter.py +1 -1
  92. llama_cloud/types/user_organization.py +9 -5
  93. llama_cloud/types/user_organization_create.py +4 -4
  94. llama_cloud/types/user_organization_delete.py +2 -2
  95. llama_cloud/types/user_organization_role.py +2 -2
  96. llama_cloud/types/value.py +5 -0
  97. llama_cloud/types/vertex_text_embedding.py +9 -5
  98. {llama_cloud-0.1.5.dist-info → llama_cloud-0.1.6.dist-info}/METADATA +2 -1
  99. {llama_cloud-0.1.5.dist-info → llama_cloud-0.1.6.dist-info}/RECORD +101 -100
  100. {llama_cloud-0.1.5.dist-info → llama_cloud-0.1.6.dist-info}/WHEEL +1 -1
  101. llama_cloud/types/data_sink_component.py +0 -20
  102. llama_cloud/types/data_source_component.py +0 -28
  103. llama_cloud/types/metadata_filter_value.py +0 -5
  104. llama_cloud/types/pipeline_data_source_component.py +0 -28
  105. {llama_cloud-0.1.5.dist-info → llama_cloud-0.1.6.dist-info}/LICENSE +0 -0
@@ -43,7 +43,7 @@ class EvalsClient:
43
43
  token="YOUR_TOKEN",
44
44
  )
45
45
  client.evals.get_dataset(
46
- dataset_id="string",
46
+ dataset_id="dataset_id",
47
47
  )
48
48
  """
49
49
  _response = self._client_wrapper.httpx_client.request(
@@ -77,8 +77,8 @@ class EvalsClient:
77
77
  token="YOUR_TOKEN",
78
78
  )
79
79
  client.evals.update_dataset(
80
- dataset_id="string",
81
- name="string",
80
+ dataset_id="dataset_id",
81
+ name="name",
82
82
  )
83
83
  """
84
84
  _response = self._client_wrapper.httpx_client.request(
@@ -111,7 +111,7 @@ class EvalsClient:
111
111
  token="YOUR_TOKEN",
112
112
  )
113
113
  client.evals.delete_dataset(
114
- dataset_id="string",
114
+ dataset_id="dataset_id",
115
115
  )
116
116
  """
117
117
  _response = self._client_wrapper.httpx_client.request(
@@ -143,7 +143,7 @@ class EvalsClient:
143
143
  token="YOUR_TOKEN",
144
144
  )
145
145
  client.evals.list_questions(
146
- dataset_id="string",
146
+ dataset_id="dataset_id",
147
147
  )
148
148
  """
149
149
  _response = self._client_wrapper.httpx_client.request(
@@ -180,9 +180,9 @@ class EvalsClient:
180
180
  token="YOUR_TOKEN",
181
181
  )
182
182
  client.evals.create_question(
183
- dataset_id="string",
183
+ dataset_id="dataset_id",
184
184
  request=EvalQuestionCreate(
185
- content="string",
185
+ content="content",
186
186
  ),
187
187
  )
188
188
  """
@@ -216,14 +216,19 @@ class EvalsClient:
216
216
 
217
217
  - request: typing.List[EvalQuestionCreate].
218
218
  ---
219
+ from llama_cloud import EvalQuestionCreate
219
220
  from llama_cloud.client import LlamaCloud
220
221
 
221
222
  client = LlamaCloud(
222
223
  token="YOUR_TOKEN",
223
224
  )
224
225
  client.evals.create_questions(
225
- dataset_id="string",
226
- request=[],
226
+ dataset_id="dataset_id",
227
+ request=[
228
+ EvalQuestionCreate(
229
+ content="content",
230
+ )
231
+ ],
227
232
  )
228
233
  """
229
234
  _response = self._client_wrapper.httpx_client.request(
@@ -258,7 +263,7 @@ class EvalsClient:
258
263
  token="YOUR_TOKEN",
259
264
  )
260
265
  client.evals.get_question(
261
- question_id="string",
266
+ question_id="question_id",
262
267
  )
263
268
  """
264
269
  _response = self._client_wrapper.httpx_client.request(
@@ -293,9 +298,9 @@ class EvalsClient:
293
298
  token="YOUR_TOKEN",
294
299
  )
295
300
  client.evals.replace_question(
296
- question_id="string",
301
+ question_id="question_id",
297
302
  request=EvalQuestionCreate(
298
- content="string",
303
+ content="content",
299
304
  ),
300
305
  )
301
306
  """
@@ -329,7 +334,7 @@ class EvalsClient:
329
334
  token="YOUR_TOKEN",
330
335
  )
331
336
  client.evals.delete_question(
332
- question_id="string",
337
+ question_id="question_id",
333
338
  )
334
339
  """
335
340
  _response = self._client_wrapper.httpx_client.request(
@@ -394,7 +399,7 @@ class AsyncEvalsClient:
394
399
  token="YOUR_TOKEN",
395
400
  )
396
401
  await client.evals.get_dataset(
397
- dataset_id="string",
402
+ dataset_id="dataset_id",
398
403
  )
399
404
  """
400
405
  _response = await self._client_wrapper.httpx_client.request(
@@ -428,8 +433,8 @@ class AsyncEvalsClient:
428
433
  token="YOUR_TOKEN",
429
434
  )
430
435
  await client.evals.update_dataset(
431
- dataset_id="string",
432
- name="string",
436
+ dataset_id="dataset_id",
437
+ name="name",
433
438
  )
434
439
  """
435
440
  _response = await self._client_wrapper.httpx_client.request(
@@ -462,7 +467,7 @@ class AsyncEvalsClient:
462
467
  token="YOUR_TOKEN",
463
468
  )
464
469
  await client.evals.delete_dataset(
465
- dataset_id="string",
470
+ dataset_id="dataset_id",
466
471
  )
467
472
  """
468
473
  _response = await self._client_wrapper.httpx_client.request(
@@ -494,7 +499,7 @@ class AsyncEvalsClient:
494
499
  token="YOUR_TOKEN",
495
500
  )
496
501
  await client.evals.list_questions(
497
- dataset_id="string",
502
+ dataset_id="dataset_id",
498
503
  )
499
504
  """
500
505
  _response = await self._client_wrapper.httpx_client.request(
@@ -531,9 +536,9 @@ class AsyncEvalsClient:
531
536
  token="YOUR_TOKEN",
532
537
  )
533
538
  await client.evals.create_question(
534
- dataset_id="string",
539
+ dataset_id="dataset_id",
535
540
  request=EvalQuestionCreate(
536
- content="string",
541
+ content="content",
537
542
  ),
538
543
  )
539
544
  """
@@ -567,14 +572,19 @@ class AsyncEvalsClient:
567
572
 
568
573
  - request: typing.List[EvalQuestionCreate].
569
574
  ---
575
+ from llama_cloud import EvalQuestionCreate
570
576
  from llama_cloud.client import AsyncLlamaCloud
571
577
 
572
578
  client = AsyncLlamaCloud(
573
579
  token="YOUR_TOKEN",
574
580
  )
575
581
  await client.evals.create_questions(
576
- dataset_id="string",
577
- request=[],
582
+ dataset_id="dataset_id",
583
+ request=[
584
+ EvalQuestionCreate(
585
+ content="content",
586
+ )
587
+ ],
578
588
  )
579
589
  """
580
590
  _response = await self._client_wrapper.httpx_client.request(
@@ -609,7 +619,7 @@ class AsyncEvalsClient:
609
619
  token="YOUR_TOKEN",
610
620
  )
611
621
  await client.evals.get_question(
612
- question_id="string",
622
+ question_id="question_id",
613
623
  )
614
624
  """
615
625
  _response = await self._client_wrapper.httpx_client.request(
@@ -644,9 +654,9 @@ class AsyncEvalsClient:
644
654
  token="YOUR_TOKEN",
645
655
  )
646
656
  await client.evals.replace_question(
647
- question_id="string",
657
+ question_id="question_id",
648
658
  request=EvalQuestionCreate(
649
- content="string",
659
+ content="content",
650
660
  ),
651
661
  )
652
662
  """
@@ -680,7 +690,7 @@ class AsyncEvalsClient:
680
690
  token="YOUR_TOKEN",
681
691
  )
682
692
  await client.evals.delete_question(
683
- question_id="string",
693
+ question_id="question_id",
684
694
  )
685
695
  """
686
696
  _response = await self._client_wrapper.httpx_client.request(
@@ -72,7 +72,7 @@ class ExtractionClient:
72
72
  Parameters:
73
73
  - name: str. The name of the extraction schema
74
74
 
75
- - project_id: typing.Optional[str].
75
+ - project_id: typing.Optional[str]. The ID of the project that the extraction schema belongs to
76
76
 
77
77
  - data_schema: typing.Dict[str, typing.Optional[ExtractionSchemaCreateDataSchemaValue]]. The schema of the data
78
78
  ---
@@ -82,7 +82,7 @@ class ExtractionClient:
82
82
  token="YOUR_TOKEN",
83
83
  )
84
84
  client.extraction.create_schema(
85
- name="string",
85
+ name="name",
86
86
  data_schema={},
87
87
  )
88
88
  """
@@ -117,11 +117,11 @@ class ExtractionClient:
117
117
  ) -> ExtractionSchema:
118
118
  """
119
119
  Parameters:
120
- - schema_id: typing.Optional[str].
120
+ - schema_id: typing.Optional[str]. The ID of a schema to update with the new schema
121
121
 
122
122
  - name: str. The name of the extraction schema
123
123
 
124
- - project_id: typing.Optional[str].
124
+ - project_id: typing.Optional[str]. The ID of the project that the extraction schema belongs to
125
125
 
126
126
  - file_ids: typing.List[str]. The IDs of the files that the extraction schema contains
127
127
 
@@ -133,8 +133,8 @@ class ExtractionClient:
133
133
  token="YOUR_TOKEN",
134
134
  )
135
135
  client.extraction.infer_schema(
136
- name="string",
137
- file_ids=[],
136
+ name="name",
137
+ file_ids=["file_ids"],
138
138
  )
139
139
  """
140
140
  _request: typing.Dict[str, typing.Any] = {"name": name, "file_ids": file_ids}
@@ -172,7 +172,7 @@ class ExtractionClient:
172
172
  token="YOUR_TOKEN",
173
173
  )
174
174
  client.extraction.get_schema(
175
- schema_id="string",
175
+ schema_id="schema_id",
176
176
  )
177
177
  """
178
178
  _response = self._client_wrapper.httpx_client.request(
@@ -201,7 +201,7 @@ class ExtractionClient:
201
201
  Parameters:
202
202
  - schema_id: str.
203
203
 
204
- - data_schema: typing.Optional[typing.Dict[str, typing.Optional[ExtractionSchemaUpdateDataSchemaValue]]].
204
+ - data_schema: typing.Optional[typing.Dict[str, typing.Optional[ExtractionSchemaUpdateDataSchemaValue]]]. The schema of the data
205
205
  ---
206
206
  from llama_cloud.client import LlamaCloud
207
207
 
@@ -209,7 +209,7 @@ class ExtractionClient:
209
209
  token="YOUR_TOKEN",
210
210
  )
211
211
  client.extraction.update_schema(
212
- schema_id="string",
212
+ schema_id="schema_id",
213
213
  )
214
214
  """
215
215
  _request: typing.Dict[str, typing.Any] = {}
@@ -243,7 +243,7 @@ class ExtractionClient:
243
243
  token="YOUR_TOKEN",
244
244
  )
245
245
  client.extraction.list_jobs(
246
- schema_id="string",
246
+ schema_id="schema_id",
247
247
  )
248
248
  """
249
249
  _response = self._client_wrapper.httpx_client.request(
@@ -276,8 +276,8 @@ class ExtractionClient:
276
276
  token="YOUR_TOKEN",
277
277
  )
278
278
  client.extraction.run_job(
279
- schema_id="string",
280
- file_id="string",
279
+ schema_id="schema_id",
280
+ file_id="file_id",
281
281
  )
282
282
  """
283
283
  _response = self._client_wrapper.httpx_client.request(
@@ -308,7 +308,7 @@ class ExtractionClient:
308
308
  token="YOUR_TOKEN",
309
309
  )
310
310
  client.extraction.get_job(
311
- job_id="string",
311
+ job_id="job_id",
312
312
  )
313
313
  """
314
314
  _response = self._client_wrapper.httpx_client.request(
@@ -340,8 +340,8 @@ class ExtractionClient:
340
340
  token="YOUR_TOKEN",
341
341
  )
342
342
  client.extraction.run_jobs_in_batch(
343
- schema_id="string",
344
- file_ids=[],
343
+ schema_id="schema_id",
344
+ file_ids=["file_ids"],
345
345
  )
346
346
  """
347
347
  _response = self._client_wrapper.httpx_client.request(
@@ -372,7 +372,7 @@ class ExtractionClient:
372
372
  token="YOUR_TOKEN",
373
373
  )
374
374
  client.extraction.get_job_result(
375
- job_id="string",
375
+ job_id="job_id",
376
376
  )
377
377
  """
378
378
  _response = self._client_wrapper.httpx_client.request(
@@ -436,7 +436,7 @@ class AsyncExtractionClient:
436
436
  Parameters:
437
437
  - name: str. The name of the extraction schema
438
438
 
439
- - project_id: typing.Optional[str].
439
+ - project_id: typing.Optional[str]. The ID of the project that the extraction schema belongs to
440
440
 
441
441
  - data_schema: typing.Dict[str, typing.Optional[ExtractionSchemaCreateDataSchemaValue]]. The schema of the data
442
442
  ---
@@ -446,7 +446,7 @@ class AsyncExtractionClient:
446
446
  token="YOUR_TOKEN",
447
447
  )
448
448
  await client.extraction.create_schema(
449
- name="string",
449
+ name="name",
450
450
  data_schema={},
451
451
  )
452
452
  """
@@ -481,11 +481,11 @@ class AsyncExtractionClient:
481
481
  ) -> ExtractionSchema:
482
482
  """
483
483
  Parameters:
484
- - schema_id: typing.Optional[str].
484
+ - schema_id: typing.Optional[str]. The ID of a schema to update with the new schema
485
485
 
486
486
  - name: str. The name of the extraction schema
487
487
 
488
- - project_id: typing.Optional[str].
488
+ - project_id: typing.Optional[str]. The ID of the project that the extraction schema belongs to
489
489
 
490
490
  - file_ids: typing.List[str]. The IDs of the files that the extraction schema contains
491
491
 
@@ -497,8 +497,8 @@ class AsyncExtractionClient:
497
497
  token="YOUR_TOKEN",
498
498
  )
499
499
  await client.extraction.infer_schema(
500
- name="string",
501
- file_ids=[],
500
+ name="name",
501
+ file_ids=["file_ids"],
502
502
  )
503
503
  """
504
504
  _request: typing.Dict[str, typing.Any] = {"name": name, "file_ids": file_ids}
@@ -536,7 +536,7 @@ class AsyncExtractionClient:
536
536
  token="YOUR_TOKEN",
537
537
  )
538
538
  await client.extraction.get_schema(
539
- schema_id="string",
539
+ schema_id="schema_id",
540
540
  )
541
541
  """
542
542
  _response = await self._client_wrapper.httpx_client.request(
@@ -565,7 +565,7 @@ class AsyncExtractionClient:
565
565
  Parameters:
566
566
  - schema_id: str.
567
567
 
568
- - data_schema: typing.Optional[typing.Dict[str, typing.Optional[ExtractionSchemaUpdateDataSchemaValue]]].
568
+ - data_schema: typing.Optional[typing.Dict[str, typing.Optional[ExtractionSchemaUpdateDataSchemaValue]]]. The schema of the data
569
569
  ---
570
570
  from llama_cloud.client import AsyncLlamaCloud
571
571
 
@@ -573,7 +573,7 @@ class AsyncExtractionClient:
573
573
  token="YOUR_TOKEN",
574
574
  )
575
575
  await client.extraction.update_schema(
576
- schema_id="string",
576
+ schema_id="schema_id",
577
577
  )
578
578
  """
579
579
  _request: typing.Dict[str, typing.Any] = {}
@@ -607,7 +607,7 @@ class AsyncExtractionClient:
607
607
  token="YOUR_TOKEN",
608
608
  )
609
609
  await client.extraction.list_jobs(
610
- schema_id="string",
610
+ schema_id="schema_id",
611
611
  )
612
612
  """
613
613
  _response = await self._client_wrapper.httpx_client.request(
@@ -640,8 +640,8 @@ class AsyncExtractionClient:
640
640
  token="YOUR_TOKEN",
641
641
  )
642
642
  await client.extraction.run_job(
643
- schema_id="string",
644
- file_id="string",
643
+ schema_id="schema_id",
644
+ file_id="file_id",
645
645
  )
646
646
  """
647
647
  _response = await self._client_wrapper.httpx_client.request(
@@ -672,7 +672,7 @@ class AsyncExtractionClient:
672
672
  token="YOUR_TOKEN",
673
673
  )
674
674
  await client.extraction.get_job(
675
- job_id="string",
675
+ job_id="job_id",
676
676
  )
677
677
  """
678
678
  _response = await self._client_wrapper.httpx_client.request(
@@ -704,8 +704,8 @@ class AsyncExtractionClient:
704
704
  token="YOUR_TOKEN",
705
705
  )
706
706
  await client.extraction.run_jobs_in_batch(
707
- schema_id="string",
708
- file_ids=[],
707
+ schema_id="schema_id",
708
+ file_ids=["file_ids"],
709
709
  )
710
710
  """
711
711
  _response = await self._client_wrapper.httpx_client.request(
@@ -736,7 +736,7 @@ class AsyncExtractionClient:
736
736
  token="YOUR_TOKEN",
737
737
  )
738
738
  await client.extraction.get_job_result(
739
- job_id="string",
739
+ job_id="job_id",
740
740
  )
741
741
  """
742
742
  _response = await self._client_wrapper.httpx_client.request(
@@ -1,5 +1,5 @@
1
1
  # This file was auto-generated by Fern from our API Definition.
2
2
 
3
- from .types import FileCreateFromUrlResourceInfoValue, FileCreateResourceInfoValue
3
+ from .types import FileCreateFromUrlResourceInfoValue, FileCreatePermissionInfoValue, FileCreateResourceInfoValue
4
4
 
5
- __all__ = ["FileCreateFromUrlResourceInfoValue", "FileCreateResourceInfoValue"]
5
+ __all__ = ["FileCreateFromUrlResourceInfoValue", "FileCreatePermissionInfoValue", "FileCreateResourceInfoValue"]