llama-cloud 0.1.6__py3-none-any.whl → 0.1.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of llama-cloud might be problematic. Click here for more details.
- llama_cloud/__init__.py +140 -6
- llama_cloud/client.py +15 -0
- llama_cloud/environment.py +1 -1
- llama_cloud/resources/__init__.py +15 -0
- llama_cloud/{types/token.py → resources/chat_apps/__init__.py} +0 -3
- llama_cloud/resources/chat_apps/client.py +630 -0
- llama_cloud/resources/data_sinks/client.py +12 -12
- llama_cloud/resources/data_sources/client.py +14 -14
- llama_cloud/resources/embedding_model_configs/client.py +20 -76
- llama_cloud/resources/evals/client.py +26 -36
- llama_cloud/resources/extraction/client.py +32 -32
- llama_cloud/resources/files/client.py +40 -44
- llama_cloud/resources/jobs/__init__.py +2 -0
- llama_cloud/resources/jobs/client.py +148 -0
- llama_cloud/resources/llama_extract/__init__.py +5 -0
- llama_cloud/resources/llama_extract/client.py +1038 -0
- llama_cloud/resources/llama_extract/types/__init__.py +6 -0
- llama_cloud/resources/llama_extract/types/extract_agent_create_data_schema_value.py +7 -0
- llama_cloud/resources/llama_extract/types/extract_agent_update_data_schema_value.py +7 -0
- llama_cloud/resources/organizations/client.py +66 -70
- llama_cloud/resources/parsing/client.py +448 -428
- llama_cloud/resources/pipelines/client.py +256 -344
- llama_cloud/resources/projects/client.py +34 -60
- llama_cloud/resources/reports/__init__.py +5 -0
- llama_cloud/resources/reports/client.py +1198 -0
- llama_cloud/resources/reports/types/__init__.py +7 -0
- llama_cloud/resources/reports/types/update_report_plan_api_v_1_reports_report_id_plan_patch_request_action.py +25 -0
- llama_cloud/resources/retrievers/__init__.py +2 -0
- llama_cloud/resources/retrievers/client.py +654 -0
- llama_cloud/types/__init__.py +128 -6
- llama_cloud/types/{chat_message.py → app_schema_chat_chat_message.py} +3 -3
- llama_cloud/types/azure_open_ai_embedding.py +6 -12
- llama_cloud/types/base_prompt_template.py +2 -6
- llama_cloud/types/bedrock_embedding.py +6 -12
- llama_cloud/types/character_splitter.py +2 -4
- llama_cloud/types/chat_app.py +44 -0
- llama_cloud/types/chat_app_response.py +41 -0
- llama_cloud/types/cloud_az_storage_blob_data_source.py +7 -15
- llama_cloud/types/cloud_box_data_source.py +6 -12
- llama_cloud/types/cloud_confluence_data_source.py +6 -6
- llama_cloud/types/cloud_document.py +1 -3
- llama_cloud/types/cloud_document_create.py +1 -3
- llama_cloud/types/cloud_jira_data_source.py +4 -6
- llama_cloud/types/cloud_notion_page_data_source.py +2 -2
- llama_cloud/types/cloud_one_drive_data_source.py +3 -5
- llama_cloud/types/cloud_postgres_vector_store.py +1 -0
- llama_cloud/types/cloud_s_3_data_source.py +4 -8
- llama_cloud/types/cloud_sharepoint_data_source.py +6 -8
- llama_cloud/types/cloud_slack_data_source.py +6 -6
- llama_cloud/types/code_splitter.py +1 -1
- llama_cloud/types/cohere_embedding.py +3 -7
- llama_cloud/types/composite_retrieval_mode.py +21 -0
- llama_cloud/types/composite_retrieval_result.py +38 -0
- llama_cloud/types/composite_retrieved_text_node.py +42 -0
- llama_cloud/types/data_sink.py +4 -4
- llama_cloud/types/data_sink_component.py +20 -0
- llama_cloud/types/data_source.py +5 -7
- llama_cloud/types/data_source_component.py +28 -0
- llama_cloud/types/data_source_create.py +1 -3
- llama_cloud/types/edit_suggestion.py +39 -0
- llama_cloud/types/embedding_model_config.py +2 -2
- llama_cloud/types/embedding_model_config_update.py +2 -4
- llama_cloud/types/eval_dataset.py +2 -2
- llama_cloud/types/eval_dataset_job_record.py +8 -13
- llama_cloud/types/eval_execution_params_override.py +2 -6
- llama_cloud/types/eval_question.py +2 -2
- llama_cloud/types/extract_agent.py +45 -0
- llama_cloud/types/extract_agent_data_schema_value.py +5 -0
- llama_cloud/types/extract_config.py +40 -0
- llama_cloud/types/extract_job.py +35 -0
- llama_cloud/types/extract_job_create.py +40 -0
- llama_cloud/types/extract_job_create_data_schema_override_value.py +7 -0
- llama_cloud/types/extract_mode.py +17 -0
- llama_cloud/types/extract_resultset.py +46 -0
- llama_cloud/types/extract_resultset_data.py +11 -0
- llama_cloud/types/extract_resultset_data_item_value.py +7 -0
- llama_cloud/types/extract_resultset_data_zero_value.py +7 -0
- llama_cloud/types/extract_resultset_extraction_metadata_value.py +7 -0
- llama_cloud/types/extraction_result.py +2 -2
- llama_cloud/types/extraction_schema.py +3 -5
- llama_cloud/types/file.py +9 -14
- llama_cloud/types/filter_condition.py +9 -1
- llama_cloud/types/filter_operator.py +6 -2
- llama_cloud/types/gemini_embedding.py +6 -10
- llama_cloud/types/hugging_face_inference_api_embedding.py +11 -27
- llama_cloud/types/hugging_face_inference_api_embedding_token.py +5 -0
- llama_cloud/types/image_block.py +35 -0
- llama_cloud/types/input_message.py +2 -4
- llama_cloud/types/job_names.py +89 -0
- llama_cloud/types/job_record.py +57 -0
- llama_cloud/types/job_record_with_usage_metrics.py +36 -0
- llama_cloud/types/llama_index_core_base_llms_types_chat_message.py +39 -0
- llama_cloud/types/llama_index_core_base_llms_types_chat_message_blocks_item.py +33 -0
- llama_cloud/types/llama_parse_parameters.py +4 -0
- llama_cloud/types/llm.py +3 -4
- llama_cloud/types/llm_model_data.py +1 -0
- llama_cloud/types/llm_parameters.py +3 -5
- llama_cloud/types/local_eval.py +8 -10
- llama_cloud/types/local_eval_results.py +1 -1
- llama_cloud/types/managed_ingestion_status.py +4 -0
- llama_cloud/types/managed_ingestion_status_response.py +4 -5
- llama_cloud/types/markdown_element_node_parser.py +3 -5
- llama_cloud/types/markdown_node_parser.py +1 -1
- llama_cloud/types/metadata_filter.py +2 -2
- llama_cloud/types/metadata_filter_value.py +5 -0
- llama_cloud/types/metric_result.py +3 -3
- llama_cloud/types/node_parser.py +1 -1
- llama_cloud/types/object_type.py +4 -0
- llama_cloud/types/open_ai_embedding.py +6 -12
- llama_cloud/types/organization.py +7 -2
- llama_cloud/types/page_splitter_node_parser.py +2 -2
- llama_cloud/types/paginated_jobs_history_with_metrics.py +35 -0
- llama_cloud/types/paginated_report_response.py +35 -0
- llama_cloud/types/parse_plan_level.py +21 -0
- llama_cloud/types/permission.py +3 -3
- llama_cloud/types/pipeline.py +7 -17
- llama_cloud/types/pipeline_configuration_hashes.py +3 -3
- llama_cloud/types/pipeline_create.py +8 -16
- llama_cloud/types/pipeline_data_source.py +7 -13
- llama_cloud/types/pipeline_data_source_component.py +28 -0
- llama_cloud/types/pipeline_data_source_create.py +1 -3
- llama_cloud/types/pipeline_deployment.py +4 -4
- llama_cloud/types/pipeline_file.py +13 -24
- llama_cloud/types/pipeline_file_create.py +1 -3
- llama_cloud/types/playground_session.py +4 -4
- llama_cloud/types/preset_retrieval_params.py +8 -14
- llama_cloud/types/presigned_url.py +1 -3
- llama_cloud/types/progress_event.py +44 -0
- llama_cloud/types/progress_event_status.py +33 -0
- llama_cloud/types/project.py +2 -2
- llama_cloud/types/prompt_mixin_prompts.py +1 -1
- llama_cloud/types/prompt_spec.py +3 -5
- llama_cloud/types/related_node_info.py +2 -2
- llama_cloud/types/related_node_info_node_type.py +7 -0
- llama_cloud/types/report.py +33 -0
- llama_cloud/types/report_block.py +34 -0
- llama_cloud/types/report_block_dependency.py +29 -0
- llama_cloud/types/report_create_response.py +31 -0
- llama_cloud/types/report_event_item.py +40 -0
- llama_cloud/types/report_event_item_event_data.py +45 -0
- llama_cloud/types/report_event_type.py +37 -0
- llama_cloud/types/report_metadata.py +43 -0
- llama_cloud/types/report_plan.py +36 -0
- llama_cloud/types/report_plan_block.py +36 -0
- llama_cloud/types/report_query.py +33 -0
- llama_cloud/types/report_response.py +41 -0
- llama_cloud/types/report_state.py +37 -0
- llama_cloud/types/report_state_event.py +38 -0
- llama_cloud/types/report_update_event.py +38 -0
- llama_cloud/types/retrieve_results.py +1 -1
- llama_cloud/types/retriever.py +45 -0
- llama_cloud/types/retriever_create.py +37 -0
- llama_cloud/types/retriever_pipeline.py +37 -0
- llama_cloud/types/role.py +3 -3
- llama_cloud/types/sentence_splitter.py +2 -4
- llama_cloud/types/status_enum.py +4 -0
- llama_cloud/types/supported_llm_model_names.py +4 -0
- llama_cloud/types/text_block.py +31 -0
- llama_cloud/types/text_node.py +15 -8
- llama_cloud/types/token_text_splitter.py +1 -1
- llama_cloud/types/usage_metric_response.py +34 -0
- llama_cloud/types/user_job_record.py +32 -0
- llama_cloud/types/user_organization.py +5 -9
- llama_cloud/types/user_organization_create.py +4 -4
- llama_cloud/types/user_organization_delete.py +2 -2
- llama_cloud/types/user_organization_role.py +2 -2
- llama_cloud/types/vertex_text_embedding.py +5 -9
- {llama_cloud-0.1.6.dist-info → llama_cloud-0.1.7.dist-info}/METADATA +2 -1
- llama_cloud-0.1.7.dist-info/RECORD +310 -0
- llama_cloud/types/value.py +0 -5
- llama_cloud-0.1.6.dist-info/RECORD +0 -241
- {llama_cloud-0.1.6.dist-info → llama_cloud-0.1.7.dist-info}/LICENSE +0 -0
- {llama_cloud-0.1.6.dist-info → llama_cloud-0.1.7.dist-info}/WHEEL +0 -0
|
@@ -72,7 +72,7 @@ class ExtractionClient:
|
|
|
72
72
|
Parameters:
|
|
73
73
|
- name: str. The name of the extraction schema
|
|
74
74
|
|
|
75
|
-
- project_id: typing.Optional[str].
|
|
75
|
+
- project_id: typing.Optional[str].
|
|
76
76
|
|
|
77
77
|
- data_schema: typing.Dict[str, typing.Optional[ExtractionSchemaCreateDataSchemaValue]]. The schema of the data
|
|
78
78
|
---
|
|
@@ -82,7 +82,7 @@ class ExtractionClient:
|
|
|
82
82
|
token="YOUR_TOKEN",
|
|
83
83
|
)
|
|
84
84
|
client.extraction.create_schema(
|
|
85
|
-
name="
|
|
85
|
+
name="string",
|
|
86
86
|
data_schema={},
|
|
87
87
|
)
|
|
88
88
|
"""
|
|
@@ -117,11 +117,11 @@ class ExtractionClient:
|
|
|
117
117
|
) -> ExtractionSchema:
|
|
118
118
|
"""
|
|
119
119
|
Parameters:
|
|
120
|
-
- schema_id: typing.Optional[str].
|
|
120
|
+
- schema_id: typing.Optional[str].
|
|
121
121
|
|
|
122
122
|
- name: str. The name of the extraction schema
|
|
123
123
|
|
|
124
|
-
- project_id: typing.Optional[str].
|
|
124
|
+
- project_id: typing.Optional[str].
|
|
125
125
|
|
|
126
126
|
- file_ids: typing.List[str]. The IDs of the files that the extraction schema contains
|
|
127
127
|
|
|
@@ -133,8 +133,8 @@ class ExtractionClient:
|
|
|
133
133
|
token="YOUR_TOKEN",
|
|
134
134
|
)
|
|
135
135
|
client.extraction.infer_schema(
|
|
136
|
-
name="
|
|
137
|
-
file_ids=[
|
|
136
|
+
name="string",
|
|
137
|
+
file_ids=[],
|
|
138
138
|
)
|
|
139
139
|
"""
|
|
140
140
|
_request: typing.Dict[str, typing.Any] = {"name": name, "file_ids": file_ids}
|
|
@@ -172,7 +172,7 @@ class ExtractionClient:
|
|
|
172
172
|
token="YOUR_TOKEN",
|
|
173
173
|
)
|
|
174
174
|
client.extraction.get_schema(
|
|
175
|
-
schema_id="
|
|
175
|
+
schema_id="string",
|
|
176
176
|
)
|
|
177
177
|
"""
|
|
178
178
|
_response = self._client_wrapper.httpx_client.request(
|
|
@@ -201,7 +201,7 @@ class ExtractionClient:
|
|
|
201
201
|
Parameters:
|
|
202
202
|
- schema_id: str.
|
|
203
203
|
|
|
204
|
-
- data_schema: typing.Optional[typing.Dict[str, typing.Optional[ExtractionSchemaUpdateDataSchemaValue]]].
|
|
204
|
+
- data_schema: typing.Optional[typing.Dict[str, typing.Optional[ExtractionSchemaUpdateDataSchemaValue]]].
|
|
205
205
|
---
|
|
206
206
|
from llama_cloud.client import LlamaCloud
|
|
207
207
|
|
|
@@ -209,7 +209,7 @@ class ExtractionClient:
|
|
|
209
209
|
token="YOUR_TOKEN",
|
|
210
210
|
)
|
|
211
211
|
client.extraction.update_schema(
|
|
212
|
-
schema_id="
|
|
212
|
+
schema_id="string",
|
|
213
213
|
)
|
|
214
214
|
"""
|
|
215
215
|
_request: typing.Dict[str, typing.Any] = {}
|
|
@@ -243,7 +243,7 @@ class ExtractionClient:
|
|
|
243
243
|
token="YOUR_TOKEN",
|
|
244
244
|
)
|
|
245
245
|
client.extraction.list_jobs(
|
|
246
|
-
schema_id="
|
|
246
|
+
schema_id="string",
|
|
247
247
|
)
|
|
248
248
|
"""
|
|
249
249
|
_response = self._client_wrapper.httpx_client.request(
|
|
@@ -276,8 +276,8 @@ class ExtractionClient:
|
|
|
276
276
|
token="YOUR_TOKEN",
|
|
277
277
|
)
|
|
278
278
|
client.extraction.run_job(
|
|
279
|
-
schema_id="
|
|
280
|
-
file_id="
|
|
279
|
+
schema_id="string",
|
|
280
|
+
file_id="string",
|
|
281
281
|
)
|
|
282
282
|
"""
|
|
283
283
|
_response = self._client_wrapper.httpx_client.request(
|
|
@@ -308,7 +308,7 @@ class ExtractionClient:
|
|
|
308
308
|
token="YOUR_TOKEN",
|
|
309
309
|
)
|
|
310
310
|
client.extraction.get_job(
|
|
311
|
-
job_id="
|
|
311
|
+
job_id="string",
|
|
312
312
|
)
|
|
313
313
|
"""
|
|
314
314
|
_response = self._client_wrapper.httpx_client.request(
|
|
@@ -340,8 +340,8 @@ class ExtractionClient:
|
|
|
340
340
|
token="YOUR_TOKEN",
|
|
341
341
|
)
|
|
342
342
|
client.extraction.run_jobs_in_batch(
|
|
343
|
-
schema_id="
|
|
344
|
-
file_ids=[
|
|
343
|
+
schema_id="string",
|
|
344
|
+
file_ids=[],
|
|
345
345
|
)
|
|
346
346
|
"""
|
|
347
347
|
_response = self._client_wrapper.httpx_client.request(
|
|
@@ -372,7 +372,7 @@ class ExtractionClient:
|
|
|
372
372
|
token="YOUR_TOKEN",
|
|
373
373
|
)
|
|
374
374
|
client.extraction.get_job_result(
|
|
375
|
-
job_id="
|
|
375
|
+
job_id="string",
|
|
376
376
|
)
|
|
377
377
|
"""
|
|
378
378
|
_response = self._client_wrapper.httpx_client.request(
|
|
@@ -436,7 +436,7 @@ class AsyncExtractionClient:
|
|
|
436
436
|
Parameters:
|
|
437
437
|
- name: str. The name of the extraction schema
|
|
438
438
|
|
|
439
|
-
- project_id: typing.Optional[str].
|
|
439
|
+
- project_id: typing.Optional[str].
|
|
440
440
|
|
|
441
441
|
- data_schema: typing.Dict[str, typing.Optional[ExtractionSchemaCreateDataSchemaValue]]. The schema of the data
|
|
442
442
|
---
|
|
@@ -446,7 +446,7 @@ class AsyncExtractionClient:
|
|
|
446
446
|
token="YOUR_TOKEN",
|
|
447
447
|
)
|
|
448
448
|
await client.extraction.create_schema(
|
|
449
|
-
name="
|
|
449
|
+
name="string",
|
|
450
450
|
data_schema={},
|
|
451
451
|
)
|
|
452
452
|
"""
|
|
@@ -481,11 +481,11 @@ class AsyncExtractionClient:
|
|
|
481
481
|
) -> ExtractionSchema:
|
|
482
482
|
"""
|
|
483
483
|
Parameters:
|
|
484
|
-
- schema_id: typing.Optional[str].
|
|
484
|
+
- schema_id: typing.Optional[str].
|
|
485
485
|
|
|
486
486
|
- name: str. The name of the extraction schema
|
|
487
487
|
|
|
488
|
-
- project_id: typing.Optional[str].
|
|
488
|
+
- project_id: typing.Optional[str].
|
|
489
489
|
|
|
490
490
|
- file_ids: typing.List[str]. The IDs of the files that the extraction schema contains
|
|
491
491
|
|
|
@@ -497,8 +497,8 @@ class AsyncExtractionClient:
|
|
|
497
497
|
token="YOUR_TOKEN",
|
|
498
498
|
)
|
|
499
499
|
await client.extraction.infer_schema(
|
|
500
|
-
name="
|
|
501
|
-
file_ids=[
|
|
500
|
+
name="string",
|
|
501
|
+
file_ids=[],
|
|
502
502
|
)
|
|
503
503
|
"""
|
|
504
504
|
_request: typing.Dict[str, typing.Any] = {"name": name, "file_ids": file_ids}
|
|
@@ -536,7 +536,7 @@ class AsyncExtractionClient:
|
|
|
536
536
|
token="YOUR_TOKEN",
|
|
537
537
|
)
|
|
538
538
|
await client.extraction.get_schema(
|
|
539
|
-
schema_id="
|
|
539
|
+
schema_id="string",
|
|
540
540
|
)
|
|
541
541
|
"""
|
|
542
542
|
_response = await self._client_wrapper.httpx_client.request(
|
|
@@ -565,7 +565,7 @@ class AsyncExtractionClient:
|
|
|
565
565
|
Parameters:
|
|
566
566
|
- schema_id: str.
|
|
567
567
|
|
|
568
|
-
- data_schema: typing.Optional[typing.Dict[str, typing.Optional[ExtractionSchemaUpdateDataSchemaValue]]].
|
|
568
|
+
- data_schema: typing.Optional[typing.Dict[str, typing.Optional[ExtractionSchemaUpdateDataSchemaValue]]].
|
|
569
569
|
---
|
|
570
570
|
from llama_cloud.client import AsyncLlamaCloud
|
|
571
571
|
|
|
@@ -573,7 +573,7 @@ class AsyncExtractionClient:
|
|
|
573
573
|
token="YOUR_TOKEN",
|
|
574
574
|
)
|
|
575
575
|
await client.extraction.update_schema(
|
|
576
|
-
schema_id="
|
|
576
|
+
schema_id="string",
|
|
577
577
|
)
|
|
578
578
|
"""
|
|
579
579
|
_request: typing.Dict[str, typing.Any] = {}
|
|
@@ -607,7 +607,7 @@ class AsyncExtractionClient:
|
|
|
607
607
|
token="YOUR_TOKEN",
|
|
608
608
|
)
|
|
609
609
|
await client.extraction.list_jobs(
|
|
610
|
-
schema_id="
|
|
610
|
+
schema_id="string",
|
|
611
611
|
)
|
|
612
612
|
"""
|
|
613
613
|
_response = await self._client_wrapper.httpx_client.request(
|
|
@@ -640,8 +640,8 @@ class AsyncExtractionClient:
|
|
|
640
640
|
token="YOUR_TOKEN",
|
|
641
641
|
)
|
|
642
642
|
await client.extraction.run_job(
|
|
643
|
-
schema_id="
|
|
644
|
-
file_id="
|
|
643
|
+
schema_id="string",
|
|
644
|
+
file_id="string",
|
|
645
645
|
)
|
|
646
646
|
"""
|
|
647
647
|
_response = await self._client_wrapper.httpx_client.request(
|
|
@@ -672,7 +672,7 @@ class AsyncExtractionClient:
|
|
|
672
672
|
token="YOUR_TOKEN",
|
|
673
673
|
)
|
|
674
674
|
await client.extraction.get_job(
|
|
675
|
-
job_id="
|
|
675
|
+
job_id="string",
|
|
676
676
|
)
|
|
677
677
|
"""
|
|
678
678
|
_response = await self._client_wrapper.httpx_client.request(
|
|
@@ -704,8 +704,8 @@ class AsyncExtractionClient:
|
|
|
704
704
|
token="YOUR_TOKEN",
|
|
705
705
|
)
|
|
706
706
|
await client.extraction.run_jobs_in_batch(
|
|
707
|
-
schema_id="
|
|
708
|
-
file_ids=[
|
|
707
|
+
schema_id="string",
|
|
708
|
+
file_ids=[],
|
|
709
709
|
)
|
|
710
710
|
"""
|
|
711
711
|
_response = await self._client_wrapper.httpx_client.request(
|
|
@@ -736,7 +736,7 @@ class AsyncExtractionClient:
|
|
|
736
736
|
token="YOUR_TOKEN",
|
|
737
737
|
)
|
|
738
738
|
await client.extraction.get_job_result(
|
|
739
|
-
job_id="
|
|
739
|
+
job_id="string",
|
|
740
740
|
)
|
|
741
741
|
"""
|
|
742
742
|
_response = await self._client_wrapper.httpx_client.request(
|
|
@@ -53,7 +53,7 @@ class FilesClient:
|
|
|
53
53
|
token="YOUR_TOKEN",
|
|
54
54
|
)
|
|
55
55
|
client.files.get_file(
|
|
56
|
-
id="
|
|
56
|
+
id="string",
|
|
57
57
|
)
|
|
58
58
|
"""
|
|
59
59
|
_response = self._client_wrapper.httpx_client.request(
|
|
@@ -92,7 +92,7 @@ class FilesClient:
|
|
|
92
92
|
token="YOUR_TOKEN",
|
|
93
93
|
)
|
|
94
94
|
client.files.delete_file(
|
|
95
|
-
id="
|
|
95
|
+
id="string",
|
|
96
96
|
)
|
|
97
97
|
"""
|
|
98
98
|
_response = self._client_wrapper.httpx_client.request(
|
|
@@ -163,13 +163,6 @@ class FilesClient:
|
|
|
163
163
|
- organization_id: typing.Optional[str].
|
|
164
164
|
|
|
165
165
|
- upload_file: typing.IO.
|
|
166
|
-
---
|
|
167
|
-
from llama_cloud.client import LlamaCloud
|
|
168
|
-
|
|
169
|
-
client = LlamaCloud(
|
|
170
|
-
token="YOUR_TOKEN",
|
|
171
|
-
)
|
|
172
|
-
client.files.upload_file()
|
|
173
166
|
"""
|
|
174
167
|
_response = self._client_wrapper.httpx_client.request(
|
|
175
168
|
"POST",
|
|
@@ -196,6 +189,7 @@ class FilesClient:
|
|
|
196
189
|
project_id: typing.Optional[str] = None,
|
|
197
190
|
organization_id: typing.Optional[str] = None,
|
|
198
191
|
name: str,
|
|
192
|
+
external_file_id: typing.Optional[str] = OMIT,
|
|
199
193
|
file_size: typing.Optional[int] = OMIT,
|
|
200
194
|
last_modified_at: typing.Optional[dt.datetime] = OMIT,
|
|
201
195
|
resource_info: typing.Optional[typing.Dict[str, typing.Optional[FileCreateResourceInfoValue]]] = OMIT,
|
|
@@ -212,15 +206,17 @@ class FilesClient:
|
|
|
212
206
|
|
|
213
207
|
- name: str.
|
|
214
208
|
|
|
215
|
-
-
|
|
209
|
+
- external_file_id: typing.Optional[str].
|
|
210
|
+
|
|
211
|
+
- file_size: typing.Optional[int].
|
|
216
212
|
|
|
217
|
-
- last_modified_at: typing.Optional[dt.datetime].
|
|
213
|
+
- last_modified_at: typing.Optional[dt.datetime].
|
|
218
214
|
|
|
219
|
-
- resource_info: typing.Optional[typing.Dict[str, typing.Optional[FileCreateResourceInfoValue]]].
|
|
215
|
+
- resource_info: typing.Optional[typing.Dict[str, typing.Optional[FileCreateResourceInfoValue]]].
|
|
220
216
|
|
|
221
|
-
- permission_info: typing.Optional[typing.Dict[str, typing.Optional[FileCreatePermissionInfoValue]]].
|
|
217
|
+
- permission_info: typing.Optional[typing.Dict[str, typing.Optional[FileCreatePermissionInfoValue]]].
|
|
222
218
|
|
|
223
|
-
- data_source_id: typing.Optional[str].
|
|
219
|
+
- data_source_id: typing.Optional[str].
|
|
224
220
|
---
|
|
225
221
|
from llama_cloud.client import LlamaCloud
|
|
226
222
|
|
|
@@ -228,10 +224,12 @@ class FilesClient:
|
|
|
228
224
|
token="YOUR_TOKEN",
|
|
229
225
|
)
|
|
230
226
|
client.files.generate_presigned_url(
|
|
231
|
-
name="
|
|
227
|
+
name="string",
|
|
232
228
|
)
|
|
233
229
|
"""
|
|
234
230
|
_request: typing.Dict[str, typing.Any] = {"name": name}
|
|
231
|
+
if external_file_id is not OMIT:
|
|
232
|
+
_request["external_file_id"] = external_file_id
|
|
235
233
|
if file_size is not OMIT:
|
|
236
234
|
_request["file_size"] = file_size
|
|
237
235
|
if last_modified_at is not OMIT:
|
|
@@ -320,15 +318,15 @@ class FilesClient:
|
|
|
320
318
|
|
|
321
319
|
- url: str. URL of the file to download
|
|
322
320
|
|
|
323
|
-
- proxy_url: typing.Optional[str].
|
|
321
|
+
- proxy_url: typing.Optional[str].
|
|
324
322
|
|
|
325
|
-
- request_headers: typing.Optional[typing.Dict[str, typing.Optional[str]]].
|
|
323
|
+
- request_headers: typing.Optional[typing.Dict[str, typing.Optional[str]]].
|
|
326
324
|
|
|
327
325
|
- verify_ssl: typing.Optional[bool]. Whether to verify the SSL certificate when downloading the file
|
|
328
326
|
|
|
329
327
|
- follow_redirects: typing.Optional[bool]. Whether to follow redirects when downloading the file
|
|
330
328
|
|
|
331
|
-
- resource_info: typing.Optional[typing.Dict[str, typing.Optional[FileCreateFromUrlResourceInfoValue]]].
|
|
329
|
+
- resource_info: typing.Optional[typing.Dict[str, typing.Optional[FileCreateFromUrlResourceInfoValue]]].
|
|
332
330
|
---
|
|
333
331
|
from llama_cloud.client import LlamaCloud
|
|
334
332
|
|
|
@@ -336,7 +334,7 @@ class FilesClient:
|
|
|
336
334
|
token="YOUR_TOKEN",
|
|
337
335
|
)
|
|
338
336
|
client.files.upload_file_from_url(
|
|
339
|
-
url="
|
|
337
|
+
url="string",
|
|
340
338
|
)
|
|
341
339
|
"""
|
|
342
340
|
_request: typing.Dict[str, typing.Any] = {"url": url}
|
|
@@ -389,7 +387,7 @@ class FilesClient:
|
|
|
389
387
|
token="YOUR_TOKEN",
|
|
390
388
|
)
|
|
391
389
|
client.files.read_file_content(
|
|
392
|
-
id="
|
|
390
|
+
id="string",
|
|
393
391
|
)
|
|
394
392
|
"""
|
|
395
393
|
_response = self._client_wrapper.httpx_client.request(
|
|
@@ -428,7 +426,7 @@ class FilesClient:
|
|
|
428
426
|
token="YOUR_TOKEN",
|
|
429
427
|
)
|
|
430
428
|
client.files.list_file_page_screenshots(
|
|
431
|
-
id="
|
|
429
|
+
id="string",
|
|
432
430
|
)
|
|
433
431
|
"""
|
|
434
432
|
_response = self._client_wrapper.httpx_client.request(
|
|
@@ -474,7 +472,7 @@ class FilesClient:
|
|
|
474
472
|
token="YOUR_TOKEN",
|
|
475
473
|
)
|
|
476
474
|
client.files.get_file_page_screenshot(
|
|
477
|
-
id="
|
|
475
|
+
id="string",
|
|
478
476
|
page_index=1,
|
|
479
477
|
)
|
|
480
478
|
"""
|
|
@@ -521,7 +519,7 @@ class AsyncFilesClient:
|
|
|
521
519
|
token="YOUR_TOKEN",
|
|
522
520
|
)
|
|
523
521
|
await client.files.get_file(
|
|
524
|
-
id="
|
|
522
|
+
id="string",
|
|
525
523
|
)
|
|
526
524
|
"""
|
|
527
525
|
_response = await self._client_wrapper.httpx_client.request(
|
|
@@ -560,7 +558,7 @@ class AsyncFilesClient:
|
|
|
560
558
|
token="YOUR_TOKEN",
|
|
561
559
|
)
|
|
562
560
|
await client.files.delete_file(
|
|
563
|
-
id="
|
|
561
|
+
id="string",
|
|
564
562
|
)
|
|
565
563
|
"""
|
|
566
564
|
_response = await self._client_wrapper.httpx_client.request(
|
|
@@ -631,13 +629,6 @@ class AsyncFilesClient:
|
|
|
631
629
|
- organization_id: typing.Optional[str].
|
|
632
630
|
|
|
633
631
|
- upload_file: typing.IO.
|
|
634
|
-
---
|
|
635
|
-
from llama_cloud.client import AsyncLlamaCloud
|
|
636
|
-
|
|
637
|
-
client = AsyncLlamaCloud(
|
|
638
|
-
token="YOUR_TOKEN",
|
|
639
|
-
)
|
|
640
|
-
await client.files.upload_file()
|
|
641
632
|
"""
|
|
642
633
|
_response = await self._client_wrapper.httpx_client.request(
|
|
643
634
|
"POST",
|
|
@@ -664,6 +655,7 @@ class AsyncFilesClient:
|
|
|
664
655
|
project_id: typing.Optional[str] = None,
|
|
665
656
|
organization_id: typing.Optional[str] = None,
|
|
666
657
|
name: str,
|
|
658
|
+
external_file_id: typing.Optional[str] = OMIT,
|
|
667
659
|
file_size: typing.Optional[int] = OMIT,
|
|
668
660
|
last_modified_at: typing.Optional[dt.datetime] = OMIT,
|
|
669
661
|
resource_info: typing.Optional[typing.Dict[str, typing.Optional[FileCreateResourceInfoValue]]] = OMIT,
|
|
@@ -680,15 +672,17 @@ class AsyncFilesClient:
|
|
|
680
672
|
|
|
681
673
|
- name: str.
|
|
682
674
|
|
|
683
|
-
-
|
|
675
|
+
- external_file_id: typing.Optional[str].
|
|
676
|
+
|
|
677
|
+
- file_size: typing.Optional[int].
|
|
684
678
|
|
|
685
|
-
- last_modified_at: typing.Optional[dt.datetime].
|
|
679
|
+
- last_modified_at: typing.Optional[dt.datetime].
|
|
686
680
|
|
|
687
|
-
- resource_info: typing.Optional[typing.Dict[str, typing.Optional[FileCreateResourceInfoValue]]].
|
|
681
|
+
- resource_info: typing.Optional[typing.Dict[str, typing.Optional[FileCreateResourceInfoValue]]].
|
|
688
682
|
|
|
689
|
-
- permission_info: typing.Optional[typing.Dict[str, typing.Optional[FileCreatePermissionInfoValue]]].
|
|
683
|
+
- permission_info: typing.Optional[typing.Dict[str, typing.Optional[FileCreatePermissionInfoValue]]].
|
|
690
684
|
|
|
691
|
-
- data_source_id: typing.Optional[str].
|
|
685
|
+
- data_source_id: typing.Optional[str].
|
|
692
686
|
---
|
|
693
687
|
from llama_cloud.client import AsyncLlamaCloud
|
|
694
688
|
|
|
@@ -696,10 +690,12 @@ class AsyncFilesClient:
|
|
|
696
690
|
token="YOUR_TOKEN",
|
|
697
691
|
)
|
|
698
692
|
await client.files.generate_presigned_url(
|
|
699
|
-
name="
|
|
693
|
+
name="string",
|
|
700
694
|
)
|
|
701
695
|
"""
|
|
702
696
|
_request: typing.Dict[str, typing.Any] = {"name": name}
|
|
697
|
+
if external_file_id is not OMIT:
|
|
698
|
+
_request["external_file_id"] = external_file_id
|
|
703
699
|
if file_size is not OMIT:
|
|
704
700
|
_request["file_size"] = file_size
|
|
705
701
|
if last_modified_at is not OMIT:
|
|
@@ -788,15 +784,15 @@ class AsyncFilesClient:
|
|
|
788
784
|
|
|
789
785
|
- url: str. URL of the file to download
|
|
790
786
|
|
|
791
|
-
- proxy_url: typing.Optional[str].
|
|
787
|
+
- proxy_url: typing.Optional[str].
|
|
792
788
|
|
|
793
|
-
- request_headers: typing.Optional[typing.Dict[str, typing.Optional[str]]].
|
|
789
|
+
- request_headers: typing.Optional[typing.Dict[str, typing.Optional[str]]].
|
|
794
790
|
|
|
795
791
|
- verify_ssl: typing.Optional[bool]. Whether to verify the SSL certificate when downloading the file
|
|
796
792
|
|
|
797
793
|
- follow_redirects: typing.Optional[bool]. Whether to follow redirects when downloading the file
|
|
798
794
|
|
|
799
|
-
- resource_info: typing.Optional[typing.Dict[str, typing.Optional[FileCreateFromUrlResourceInfoValue]]].
|
|
795
|
+
- resource_info: typing.Optional[typing.Dict[str, typing.Optional[FileCreateFromUrlResourceInfoValue]]].
|
|
800
796
|
---
|
|
801
797
|
from llama_cloud.client import AsyncLlamaCloud
|
|
802
798
|
|
|
@@ -804,7 +800,7 @@ class AsyncFilesClient:
|
|
|
804
800
|
token="YOUR_TOKEN",
|
|
805
801
|
)
|
|
806
802
|
await client.files.upload_file_from_url(
|
|
807
|
-
url="
|
|
803
|
+
url="string",
|
|
808
804
|
)
|
|
809
805
|
"""
|
|
810
806
|
_request: typing.Dict[str, typing.Any] = {"url": url}
|
|
@@ -857,7 +853,7 @@ class AsyncFilesClient:
|
|
|
857
853
|
token="YOUR_TOKEN",
|
|
858
854
|
)
|
|
859
855
|
await client.files.read_file_content(
|
|
860
|
-
id="
|
|
856
|
+
id="string",
|
|
861
857
|
)
|
|
862
858
|
"""
|
|
863
859
|
_response = await self._client_wrapper.httpx_client.request(
|
|
@@ -896,7 +892,7 @@ class AsyncFilesClient:
|
|
|
896
892
|
token="YOUR_TOKEN",
|
|
897
893
|
)
|
|
898
894
|
await client.files.list_file_page_screenshots(
|
|
899
|
-
id="
|
|
895
|
+
id="string",
|
|
900
896
|
)
|
|
901
897
|
"""
|
|
902
898
|
_response = await self._client_wrapper.httpx_client.request(
|
|
@@ -942,7 +938,7 @@ class AsyncFilesClient:
|
|
|
942
938
|
token="YOUR_TOKEN",
|
|
943
939
|
)
|
|
944
940
|
await client.files.get_file_page_screenshot(
|
|
945
|
-
id="
|
|
941
|
+
id="string",
|
|
946
942
|
page_index=1,
|
|
947
943
|
)
|
|
948
944
|
"""
|
|
@@ -0,0 +1,148 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
import typing
|
|
4
|
+
import urllib.parse
|
|
5
|
+
from json.decoder import JSONDecodeError
|
|
6
|
+
|
|
7
|
+
from ...core.api_error import ApiError
|
|
8
|
+
from ...core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
|
|
9
|
+
from ...core.remove_none_from_dict import remove_none_from_dict
|
|
10
|
+
from ...errors.unprocessable_entity_error import UnprocessableEntityError
|
|
11
|
+
from ...types.http_validation_error import HttpValidationError
|
|
12
|
+
from ...types.paginated_jobs_history_with_metrics import PaginatedJobsHistoryWithMetrics
|
|
13
|
+
|
|
14
|
+
try:
|
|
15
|
+
import pydantic
|
|
16
|
+
if pydantic.__version__.startswith("1."):
|
|
17
|
+
raise ImportError
|
|
18
|
+
import pydantic.v1 as pydantic # type: ignore
|
|
19
|
+
except ImportError:
|
|
20
|
+
import pydantic # type: ignore
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class JobsClient:
|
|
24
|
+
def __init__(self, *, client_wrapper: SyncClientWrapper):
|
|
25
|
+
self._client_wrapper = client_wrapper
|
|
26
|
+
|
|
27
|
+
def get_jobs_api_v_1_jobs_get(
|
|
28
|
+
self,
|
|
29
|
+
*,
|
|
30
|
+
job_name: typing.Optional[str] = None,
|
|
31
|
+
limit: typing.Optional[int] = None,
|
|
32
|
+
offset: typing.Optional[int] = None,
|
|
33
|
+
include_usage_metrics: typing.Optional[bool] = None,
|
|
34
|
+
project_id: typing.Optional[str] = None,
|
|
35
|
+
organization_id: typing.Optional[str] = None,
|
|
36
|
+
) -> PaginatedJobsHistoryWithMetrics:
|
|
37
|
+
"""
|
|
38
|
+
Get jobs for a project.
|
|
39
|
+
|
|
40
|
+
Parameters:
|
|
41
|
+
- job_name: typing.Optional[str].
|
|
42
|
+
|
|
43
|
+
- limit: typing.Optional[int].
|
|
44
|
+
|
|
45
|
+
- offset: typing.Optional[int].
|
|
46
|
+
|
|
47
|
+
- include_usage_metrics: typing.Optional[bool].
|
|
48
|
+
|
|
49
|
+
- project_id: typing.Optional[str].
|
|
50
|
+
|
|
51
|
+
- organization_id: typing.Optional[str].
|
|
52
|
+
---
|
|
53
|
+
from llama_cloud.client import LlamaCloud
|
|
54
|
+
|
|
55
|
+
client = LlamaCloud(
|
|
56
|
+
token="YOUR_TOKEN",
|
|
57
|
+
)
|
|
58
|
+
client.jobs.get_jobs_api_v_1_jobs_get()
|
|
59
|
+
"""
|
|
60
|
+
_response = self._client_wrapper.httpx_client.request(
|
|
61
|
+
"GET",
|
|
62
|
+
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/jobs"),
|
|
63
|
+
params=remove_none_from_dict(
|
|
64
|
+
{
|
|
65
|
+
"job_name": job_name,
|
|
66
|
+
"limit": limit,
|
|
67
|
+
"offset": offset,
|
|
68
|
+
"include_usage_metrics": include_usage_metrics,
|
|
69
|
+
"project_id": project_id,
|
|
70
|
+
"organization_id": organization_id,
|
|
71
|
+
}
|
|
72
|
+
),
|
|
73
|
+
headers=self._client_wrapper.get_headers(),
|
|
74
|
+
timeout=60,
|
|
75
|
+
)
|
|
76
|
+
if 200 <= _response.status_code < 300:
|
|
77
|
+
return pydantic.parse_obj_as(PaginatedJobsHistoryWithMetrics, _response.json()) # type: ignore
|
|
78
|
+
if _response.status_code == 422:
|
|
79
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
80
|
+
try:
|
|
81
|
+
_response_json = _response.json()
|
|
82
|
+
except JSONDecodeError:
|
|
83
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
84
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
class AsyncJobsClient:
|
|
88
|
+
def __init__(self, *, client_wrapper: AsyncClientWrapper):
|
|
89
|
+
self._client_wrapper = client_wrapper
|
|
90
|
+
|
|
91
|
+
async def get_jobs_api_v_1_jobs_get(
|
|
92
|
+
self,
|
|
93
|
+
*,
|
|
94
|
+
job_name: typing.Optional[str] = None,
|
|
95
|
+
limit: typing.Optional[int] = None,
|
|
96
|
+
offset: typing.Optional[int] = None,
|
|
97
|
+
include_usage_metrics: typing.Optional[bool] = None,
|
|
98
|
+
project_id: typing.Optional[str] = None,
|
|
99
|
+
organization_id: typing.Optional[str] = None,
|
|
100
|
+
) -> PaginatedJobsHistoryWithMetrics:
|
|
101
|
+
"""
|
|
102
|
+
Get jobs for a project.
|
|
103
|
+
|
|
104
|
+
Parameters:
|
|
105
|
+
- job_name: typing.Optional[str].
|
|
106
|
+
|
|
107
|
+
- limit: typing.Optional[int].
|
|
108
|
+
|
|
109
|
+
- offset: typing.Optional[int].
|
|
110
|
+
|
|
111
|
+
- include_usage_metrics: typing.Optional[bool].
|
|
112
|
+
|
|
113
|
+
- project_id: typing.Optional[str].
|
|
114
|
+
|
|
115
|
+
- organization_id: typing.Optional[str].
|
|
116
|
+
---
|
|
117
|
+
from llama_cloud.client import AsyncLlamaCloud
|
|
118
|
+
|
|
119
|
+
client = AsyncLlamaCloud(
|
|
120
|
+
token="YOUR_TOKEN",
|
|
121
|
+
)
|
|
122
|
+
await client.jobs.get_jobs_api_v_1_jobs_get()
|
|
123
|
+
"""
|
|
124
|
+
_response = await self._client_wrapper.httpx_client.request(
|
|
125
|
+
"GET",
|
|
126
|
+
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/jobs"),
|
|
127
|
+
params=remove_none_from_dict(
|
|
128
|
+
{
|
|
129
|
+
"job_name": job_name,
|
|
130
|
+
"limit": limit,
|
|
131
|
+
"offset": offset,
|
|
132
|
+
"include_usage_metrics": include_usage_metrics,
|
|
133
|
+
"project_id": project_id,
|
|
134
|
+
"organization_id": organization_id,
|
|
135
|
+
}
|
|
136
|
+
),
|
|
137
|
+
headers=self._client_wrapper.get_headers(),
|
|
138
|
+
timeout=60,
|
|
139
|
+
)
|
|
140
|
+
if 200 <= _response.status_code < 300:
|
|
141
|
+
return pydantic.parse_obj_as(PaginatedJobsHistoryWithMetrics, _response.json()) # type: ignore
|
|
142
|
+
if _response.status_code == 422:
|
|
143
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
144
|
+
try:
|
|
145
|
+
_response_json = _response.json()
|
|
146
|
+
except JSONDecodeError:
|
|
147
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
148
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|