llama-cloud 0.1.40__py3-none-any.whl → 0.1.42__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of llama-cloud might be problematic. Click here for more details.

Files changed (60) hide show
  1. llama_cloud/__init__.py +18 -72
  2. llama_cloud/client.py +2 -5
  3. llama_cloud/resources/__init__.py +0 -4
  4. llama_cloud/resources/alpha/client.py +14 -30
  5. llama_cloud/resources/beta/client.py +1791 -344
  6. llama_cloud/resources/llama_extract/client.py +48 -0
  7. llama_cloud/resources/organizations/client.py +18 -4
  8. llama_cloud/resources/parsing/client.py +56 -0
  9. llama_cloud/resources/pipelines/client.py +164 -0
  10. llama_cloud/types/__init__.py +18 -72
  11. llama_cloud/types/agent_data.py +1 -1
  12. llama_cloud/types/agent_deployment_summary.py +1 -2
  13. llama_cloud/types/{report_create_response.py → api_key.py} +14 -2
  14. llama_cloud/types/{edit_suggestion.py → api_key_query_response.py} +6 -6
  15. llama_cloud/types/api_key_type.py +17 -0
  16. llama_cloud/types/{src_app_schema_chat_chat_message.py → chat_message.py} +1 -1
  17. llama_cloud/types/extract_config.py +8 -2
  18. llama_cloud/types/extract_models.py +28 -28
  19. llama_cloud/types/legacy_parse_job_config.py +3 -0
  20. llama_cloud/types/llama_extract_mode_availability.py +4 -3
  21. llama_cloud/types/llama_extract_settings.py +1 -1
  22. llama_cloud/types/llama_parse_parameters.py +7 -0
  23. llama_cloud/types/organization.py +1 -0
  24. llama_cloud/types/{progress_event.py → parse_configuration.py} +12 -12
  25. llama_cloud/types/{llama_index_core_base_llms_types_chat_message.py → parse_configuration_create.py} +9 -7
  26. llama_cloud/types/{report_update_event.py → parse_configuration_filter.py} +8 -6
  27. llama_cloud/types/{report_state_event.py → parse_configuration_query_response.py} +6 -6
  28. llama_cloud/types/parse_job_config.py +7 -0
  29. llama_cloud/types/pipeline_create.py +1 -1
  30. llama_cloud/types/playground_session.py +2 -2
  31. llama_cloud/types/public_model_name.py +97 -0
  32. llama_cloud/types/quota_configuration_configuration_type.py +4 -0
  33. {llama_cloud-0.1.40.dist-info → llama_cloud-0.1.42.dist-info}/METADATA +1 -1
  34. {llama_cloud-0.1.40.dist-info → llama_cloud-0.1.42.dist-info}/RECORD +36 -58
  35. {llama_cloud-0.1.40.dist-info → llama_cloud-0.1.42.dist-info}/WHEEL +1 -1
  36. llama_cloud/resources/reports/__init__.py +0 -5
  37. llama_cloud/resources/reports/client.py +0 -1230
  38. llama_cloud/resources/reports/types/__init__.py +0 -7
  39. llama_cloud/resources/reports/types/update_report_plan_api_v_1_reports_report_id_plan_patch_request_action.py +0 -25
  40. llama_cloud/types/audio_block.py +0 -34
  41. llama_cloud/types/document_block.py +0 -35
  42. llama_cloud/types/edit_suggestion_blocks_item.py +0 -8
  43. llama_cloud/types/image_block.py +0 -35
  44. llama_cloud/types/llama_index_core_base_llms_types_chat_message_blocks_item.py +0 -56
  45. llama_cloud/types/paginated_report_response.py +0 -35
  46. llama_cloud/types/progress_event_status.py +0 -33
  47. llama_cloud/types/report.py +0 -33
  48. llama_cloud/types/report_block.py +0 -35
  49. llama_cloud/types/report_block_dependency.py +0 -29
  50. llama_cloud/types/report_event_item.py +0 -40
  51. llama_cloud/types/report_event_item_event_data.py +0 -45
  52. llama_cloud/types/report_event_type.py +0 -37
  53. llama_cloud/types/report_metadata.py +0 -43
  54. llama_cloud/types/report_plan.py +0 -36
  55. llama_cloud/types/report_plan_block.py +0 -36
  56. llama_cloud/types/report_query.py +0 -33
  57. llama_cloud/types/report_response.py +0 -41
  58. llama_cloud/types/report_state.py +0 -37
  59. llama_cloud/types/text_block.py +0 -31
  60. {llama_cloud-0.1.40.dist-info → llama_cloud-0.1.42.dist-info}/LICENSE +0 -0
llama_cloud/__init__.py CHANGED
@@ -16,7 +16,9 @@ from .types import (
16
16
  AgentDeploymentList,
17
17
  AgentDeploymentSummary,
18
18
  AggregateGroup,
19
- AudioBlock,
19
+ ApiKey,
20
+ ApiKeyQueryResponse,
21
+ ApiKeyType,
20
22
  AutoTransformConfig,
21
23
  AzureOpenAiEmbedding,
22
24
  AzureOpenAiEmbeddingConfig,
@@ -36,6 +38,7 @@ from .types import (
36
38
  ChatApp,
37
39
  ChatAppResponse,
38
40
  ChatData,
41
+ ChatMessage,
39
42
  ChunkMode,
40
43
  ClassificationResult,
41
44
  ClassifierRule,
@@ -85,11 +88,8 @@ from .types import (
85
88
  DataSourceReaderVersionMetadataReaderVersion,
86
89
  DataSourceUpdateDispatcherConfig,
87
90
  DeleteParams,
88
- DocumentBlock,
89
91
  DocumentChunkMode,
90
92
  DocumentIngestionJobParams,
91
- EditSuggestion,
92
- EditSuggestionBlocksItem,
93
93
  ElementSegmentationConfig,
94
94
  EmbeddingModelConfig,
95
95
  EmbeddingModelConfigEmbeddingConfig,
@@ -171,7 +171,6 @@ from .types import (
171
171
  HuggingFaceInferenceApiEmbedding,
172
172
  HuggingFaceInferenceApiEmbeddingConfig,
173
173
  HuggingFaceInferenceApiEmbeddingToken,
174
- ImageBlock,
175
174
  IngestionErrorResponse,
176
175
  InputMessage,
177
176
  JobNameMapping,
@@ -195,12 +194,6 @@ from .types import (
195
194
  LlamaExtractModeAvailability,
196
195
  LlamaExtractModeAvailabilityStatus,
197
196
  LlamaExtractSettings,
198
- LlamaIndexCoreBaseLlmsTypesChatMessage,
199
- LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem,
200
- LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem_Audio,
201
- LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem_Document,
202
- LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem_Image,
203
- LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem_Text,
204
197
  LlamaParseParameters,
205
198
  LlamaParseParametersPriority,
206
199
  LlamaParseSupportedFileExtensions,
@@ -235,11 +228,14 @@ from .types import (
235
228
  PaginatedJobsHistoryWithMetrics,
236
229
  PaginatedListCloudDocumentsResponse,
237
230
  PaginatedListPipelineFilesResponse,
238
- PaginatedReportResponse,
239
231
  PaginatedResponseAgentData,
240
232
  PaginatedResponseAggregateGroup,
241
233
  PaginatedResponseClassifyJob,
242
234
  PaginatedResponseQuotaConfiguration,
235
+ ParseConfiguration,
236
+ ParseConfigurationCreate,
237
+ ParseConfigurationFilter,
238
+ ParseConfigurationQueryResponse,
243
239
  ParseJobConfig,
244
240
  ParseJobConfigPriority,
245
241
  ParsePlanLevel,
@@ -307,11 +303,10 @@ from .types import (
307
303
  PresetRetrievalParams,
308
304
  PresetRetrievalParamsSearchFiltersInferenceSchemaValue,
309
305
  PresignedUrl,
310
- ProgressEvent,
311
- ProgressEventStatus,
312
306
  Project,
313
307
  ProjectCreate,
314
308
  PromptConf,
309
+ PublicModelName,
315
310
  QuotaConfiguration,
316
311
  QuotaConfigurationConfigurationType,
317
312
  QuotaConfigurationStatus,
@@ -322,24 +317,6 @@ from .types import (
322
317
  RecurringCreditGrant,
323
318
  RelatedNodeInfo,
324
319
  RelatedNodeInfoNodeType,
325
- Report,
326
- ReportBlock,
327
- ReportBlockDependency,
328
- ReportCreateResponse,
329
- ReportEventItem,
330
- ReportEventItemEventData,
331
- ReportEventItemEventData_Progress,
332
- ReportEventItemEventData_ReportBlockUpdate,
333
- ReportEventItemEventData_ReportStateUpdate,
334
- ReportEventType,
335
- ReportMetadata,
336
- ReportPlan,
337
- ReportPlanBlock,
338
- ReportQuery,
339
- ReportResponse,
340
- ReportState,
341
- ReportStateEvent,
342
- ReportUpdateEvent,
343
320
  RetrievalMode,
344
321
  RetrieveResults,
345
322
  Retriever,
@@ -353,13 +330,11 @@ from .types import (
353
330
  SentenceChunkingConfig,
354
331
  SparseModelConfig,
355
332
  SparseModelType,
356
- SrcAppSchemaChatChatMessage,
357
333
  StatusEnum,
358
334
  StructMode,
359
335
  StructParseConf,
360
336
  SupportedLlmModel,
361
337
  SupportedLlmModelNames,
362
- TextBlock,
363
338
  TextNode,
364
339
  TextNodeRelationshipsValue,
365
340
  TextNodeWithScore,
@@ -418,7 +393,6 @@ from .resources import (
418
393
  PipelineUpdateEmbeddingConfig_VertexaiEmbedding,
419
394
  PipelineUpdateTransformConfig,
420
395
  RetrievalParamsSearchFiltersInferenceSchemaValue,
421
- UpdateReportPlanApiV1ReportsReportIdPlanPatchRequestAction,
422
396
  admin,
423
397
  agent_deployments,
424
398
  alpha,
@@ -436,7 +410,6 @@ from .resources import (
436
410
  parsing,
437
411
  pipelines,
438
412
  projects,
439
- reports,
440
413
  retrievers,
441
414
  users,
442
415
  )
@@ -458,7 +431,9 @@ __all__ = [
458
431
  "AgentDeploymentList",
459
432
  "AgentDeploymentSummary",
460
433
  "AggregateGroup",
461
- "AudioBlock",
434
+ "ApiKey",
435
+ "ApiKeyQueryResponse",
436
+ "ApiKeyType",
462
437
  "AutoTransformConfig",
463
438
  "AzureOpenAiEmbedding",
464
439
  "AzureOpenAiEmbeddingConfig",
@@ -478,6 +453,7 @@ __all__ = [
478
453
  "ChatApp",
479
454
  "ChatAppResponse",
480
455
  "ChatData",
456
+ "ChatMessage",
481
457
  "ChunkMode",
482
458
  "ClassificationResult",
483
459
  "ClassifierRule",
@@ -530,11 +506,8 @@ __all__ = [
530
506
  "DataSourceUpdateCustomMetadataValue",
531
507
  "DataSourceUpdateDispatcherConfig",
532
508
  "DeleteParams",
533
- "DocumentBlock",
534
509
  "DocumentChunkMode",
535
510
  "DocumentIngestionJobParams",
536
- "EditSuggestion",
537
- "EditSuggestionBlocksItem",
538
511
  "ElementSegmentationConfig",
539
512
  "EmbeddingModelConfig",
540
513
  "EmbeddingModelConfigCreateEmbeddingConfig",
@@ -635,7 +608,6 @@ __all__ = [
635
608
  "HuggingFaceInferenceApiEmbedding",
636
609
  "HuggingFaceInferenceApiEmbeddingConfig",
637
610
  "HuggingFaceInferenceApiEmbeddingToken",
638
- "ImageBlock",
639
611
  "IngestionErrorResponse",
640
612
  "InputMessage",
641
613
  "JobNameMapping",
@@ -660,12 +632,6 @@ __all__ = [
660
632
  "LlamaExtractModeAvailability",
661
633
  "LlamaExtractModeAvailabilityStatus",
662
634
  "LlamaExtractSettings",
663
- "LlamaIndexCoreBaseLlmsTypesChatMessage",
664
- "LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem",
665
- "LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem_Audio",
666
- "LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem_Document",
667
- "LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem_Image",
668
- "LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem_Text",
669
635
  "LlamaParseParameters",
670
636
  "LlamaParseParametersPriority",
671
637
  "LlamaParseSupportedFileExtensions",
@@ -700,11 +666,14 @@ __all__ = [
700
666
  "PaginatedJobsHistoryWithMetrics",
701
667
  "PaginatedListCloudDocumentsResponse",
702
668
  "PaginatedListPipelineFilesResponse",
703
- "PaginatedReportResponse",
704
669
  "PaginatedResponseAgentData",
705
670
  "PaginatedResponseAggregateGroup",
706
671
  "PaginatedResponseClassifyJob",
707
672
  "PaginatedResponseQuotaConfiguration",
673
+ "ParseConfiguration",
674
+ "ParseConfigurationCreate",
675
+ "ParseConfigurationFilter",
676
+ "ParseConfigurationQueryResponse",
708
677
  "ParseJobConfig",
709
678
  "ParseJobConfigPriority",
710
679
  "ParsePlanLevel",
@@ -782,11 +751,10 @@ __all__ = [
782
751
  "PresetRetrievalParams",
783
752
  "PresetRetrievalParamsSearchFiltersInferenceSchemaValue",
784
753
  "PresignedUrl",
785
- "ProgressEvent",
786
- "ProgressEventStatus",
787
754
  "Project",
788
755
  "ProjectCreate",
789
756
  "PromptConf",
757
+ "PublicModelName",
790
758
  "QuotaConfiguration",
791
759
  "QuotaConfigurationConfigurationType",
792
760
  "QuotaConfigurationStatus",
@@ -797,24 +765,6 @@ __all__ = [
797
765
  "RecurringCreditGrant",
798
766
  "RelatedNodeInfo",
799
767
  "RelatedNodeInfoNodeType",
800
- "Report",
801
- "ReportBlock",
802
- "ReportBlockDependency",
803
- "ReportCreateResponse",
804
- "ReportEventItem",
805
- "ReportEventItemEventData",
806
- "ReportEventItemEventData_Progress",
807
- "ReportEventItemEventData_ReportBlockUpdate",
808
- "ReportEventItemEventData_ReportStateUpdate",
809
- "ReportEventType",
810
- "ReportMetadata",
811
- "ReportPlan",
812
- "ReportPlanBlock",
813
- "ReportQuery",
814
- "ReportResponse",
815
- "ReportState",
816
- "ReportStateEvent",
817
- "ReportUpdateEvent",
818
768
  "RetrievalMode",
819
769
  "RetrievalParamsSearchFiltersInferenceSchemaValue",
820
770
  "RetrieveResults",
@@ -829,19 +779,16 @@ __all__ = [
829
779
  "SentenceChunkingConfig",
830
780
  "SparseModelConfig",
831
781
  "SparseModelType",
832
- "SrcAppSchemaChatChatMessage",
833
782
  "StatusEnum",
834
783
  "StructMode",
835
784
  "StructParseConf",
836
785
  "SupportedLlmModel",
837
786
  "SupportedLlmModelNames",
838
- "TextBlock",
839
787
  "TextNode",
840
788
  "TextNodeRelationshipsValue",
841
789
  "TextNodeWithScore",
842
790
  "TokenChunkingConfig",
843
791
  "UnprocessableEntityError",
844
- "UpdateReportPlanApiV1ReportsReportIdPlanPatchRequestAction",
845
792
  "UpdateUserResponse",
846
793
  "UsageAndPlan",
847
794
  "UsageMetricResponse",
@@ -877,7 +824,6 @@ __all__ = [
877
824
  "parsing",
878
825
  "pipelines",
879
826
  "projects",
880
- "reports",
881
827
  "retrievers",
882
828
  "users",
883
829
  ]
llama_cloud/client.py CHANGED
@@ -23,7 +23,6 @@ from .resources.organizations.client import AsyncOrganizationsClient, Organizati
23
23
  from .resources.parsing.client import AsyncParsingClient, ParsingClient
24
24
  from .resources.pipelines.client import AsyncPipelinesClient, PipelinesClient
25
25
  from .resources.projects.client import AsyncProjectsClient, ProjectsClient
26
- from .resources.reports.client import AsyncReportsClient, ReportsClient
27
26
  from .resources.retrievers.client import AsyncRetrieversClient, RetrieversClient
28
27
  from .resources.users.client import AsyncUsersClient, UsersClient
29
28
 
@@ -43,6 +42,7 @@ class LlamaCloud:
43
42
  token=token,
44
43
  httpx_client=httpx.Client(timeout=timeout) if httpx_client is None else httpx_client,
45
44
  )
45
+ self.agent_deployments = AgentDeploymentsClient(client_wrapper=self._client_wrapper)
46
46
  self.data_sinks = DataSinksClient(client_wrapper=self._client_wrapper)
47
47
  self.data_sources = DataSourcesClient(client_wrapper=self._client_wrapper)
48
48
  self.embedding_model_configs = EmbeddingModelConfigsClient(client_wrapper=self._client_wrapper)
@@ -55,12 +55,10 @@ class LlamaCloud:
55
55
  self.evals = EvalsClient(client_wrapper=self._client_wrapper)
56
56
  self.parsing = ParsingClient(client_wrapper=self._client_wrapper)
57
57
  self.chat_apps = ChatAppsClient(client_wrapper=self._client_wrapper)
58
- self.agent_deployments = AgentDeploymentsClient(client_wrapper=self._client_wrapper)
59
58
  self.classifier = ClassifierClient(client_wrapper=self._client_wrapper)
60
59
  self.admin = AdminClient(client_wrapper=self._client_wrapper)
61
60
  self.users = UsersClient(client_wrapper=self._client_wrapper)
62
61
  self.llama_extract = LlamaExtractClient(client_wrapper=self._client_wrapper)
63
- self.reports = ReportsClient(client_wrapper=self._client_wrapper)
64
62
  self.beta = BetaClient(client_wrapper=self._client_wrapper)
65
63
  self.alpha = AlphaClient(client_wrapper=self._client_wrapper)
66
64
 
@@ -80,6 +78,7 @@ class AsyncLlamaCloud:
80
78
  token=token,
81
79
  httpx_client=httpx.AsyncClient(timeout=timeout) if httpx_client is None else httpx_client,
82
80
  )
81
+ self.agent_deployments = AsyncAgentDeploymentsClient(client_wrapper=self._client_wrapper)
83
82
  self.data_sinks = AsyncDataSinksClient(client_wrapper=self._client_wrapper)
84
83
  self.data_sources = AsyncDataSourcesClient(client_wrapper=self._client_wrapper)
85
84
  self.embedding_model_configs = AsyncEmbeddingModelConfigsClient(client_wrapper=self._client_wrapper)
@@ -92,12 +91,10 @@ class AsyncLlamaCloud:
92
91
  self.evals = AsyncEvalsClient(client_wrapper=self._client_wrapper)
93
92
  self.parsing = AsyncParsingClient(client_wrapper=self._client_wrapper)
94
93
  self.chat_apps = AsyncChatAppsClient(client_wrapper=self._client_wrapper)
95
- self.agent_deployments = AsyncAgentDeploymentsClient(client_wrapper=self._client_wrapper)
96
94
  self.classifier = AsyncClassifierClient(client_wrapper=self._client_wrapper)
97
95
  self.admin = AsyncAdminClient(client_wrapper=self._client_wrapper)
98
96
  self.users = AsyncUsersClient(client_wrapper=self._client_wrapper)
99
97
  self.llama_extract = AsyncLlamaExtractClient(client_wrapper=self._client_wrapper)
100
- self.reports = AsyncReportsClient(client_wrapper=self._client_wrapper)
101
98
  self.beta = AsyncBetaClient(client_wrapper=self._client_wrapper)
102
99
  self.alpha = AsyncAlphaClient(client_wrapper=self._client_wrapper)
103
100
 
@@ -18,7 +18,6 @@ from . import (
18
18
  parsing,
19
19
  pipelines,
20
20
  projects,
21
- reports,
22
21
  retrievers,
23
22
  users,
24
23
  )
@@ -60,7 +59,6 @@ from .pipelines import (
60
59
  PipelineUpdateTransformConfig,
61
60
  RetrievalParamsSearchFiltersInferenceSchemaValue,
62
61
  )
63
- from .reports import UpdateReportPlanApiV1ReportsReportIdPlanPatchRequestAction
64
62
 
65
63
  __all__ = [
66
64
  "DataSinkUpdateComponent",
@@ -96,7 +94,6 @@ __all__ = [
96
94
  "PipelineUpdateEmbeddingConfig_VertexaiEmbedding",
97
95
  "PipelineUpdateTransformConfig",
98
96
  "RetrievalParamsSearchFiltersInferenceSchemaValue",
99
- "UpdateReportPlanApiV1ReportsReportIdPlanPatchRequestAction",
100
97
  "admin",
101
98
  "agent_deployments",
102
99
  "alpha",
@@ -114,7 +111,6 @@ __all__ = [
114
111
  "parsing",
115
112
  "pipelines",
116
113
  "projects",
117
- "reports",
118
114
  "retrievers",
119
115
  "users",
120
116
  ]
@@ -6,7 +6,6 @@ from json.decoder import JSONDecodeError
6
6
 
7
7
  from ...core.api_error import ApiError
8
8
  from ...core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
9
- from ...core.jsonable_encoder import jsonable_encoder
10
9
  from ...core.remove_none_from_dict import remove_none_from_dict
11
10
  from ...errors.unprocessable_entity_error import UnprocessableEntityError
12
11
  from ...types.http_validation_error import HttpValidationError
@@ -20,40 +19,31 @@ try:
20
19
  except ImportError:
21
20
  import pydantic # type: ignore
22
21
 
23
- # this is used as the default value for optional parameters
24
- OMIT = typing.cast(typing.Any, ...)
25
-
26
22
 
27
23
  class AlphaClient:
28
24
  def __init__(self, *, client_wrapper: SyncClientWrapper):
29
25
  self._client_wrapper = client_wrapper
30
26
 
31
27
  def upload_file_v_2(
32
- self,
33
- *,
34
- project_id: typing.Optional[str] = None,
35
- organization_id: typing.Optional[str] = None,
36
- configuration: str,
37
- file: typing.Optional[str] = OMIT,
28
+ self, *, project_id: typing.Optional[str] = None, organization_id: typing.Optional[str] = None
38
29
  ) -> ParsingJob:
39
30
  """
40
31
  Parameters:
41
32
  - project_id: typing.Optional[str].
42
33
 
43
34
  - organization_id: typing.Optional[str].
35
+ ---
36
+ from llama_cloud.client import LlamaCloud
44
37
 
45
- - configuration: str.
46
-
47
- - file: typing.Optional[str].
38
+ client = LlamaCloud(
39
+ token="YOUR_TOKEN",
40
+ )
41
+ client.alpha.upload_file_v_2()
48
42
  """
49
- _request: typing.Dict[str, typing.Any] = {"configuration": configuration}
50
- if file is not OMIT:
51
- _request["file"] = file
52
43
  _response = self._client_wrapper.httpx_client.request(
53
44
  "POST",
54
45
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v2alpha1/parse/upload"),
55
46
  params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
56
- json=jsonable_encoder(_request),
57
47
  headers=self._client_wrapper.get_headers(),
58
48
  timeout=60,
59
49
  )
@@ -73,31 +63,25 @@ class AsyncAlphaClient:
73
63
  self._client_wrapper = client_wrapper
74
64
 
75
65
  async def upload_file_v_2(
76
- self,
77
- *,
78
- project_id: typing.Optional[str] = None,
79
- organization_id: typing.Optional[str] = None,
80
- configuration: str,
81
- file: typing.Optional[str] = OMIT,
66
+ self, *, project_id: typing.Optional[str] = None, organization_id: typing.Optional[str] = None
82
67
  ) -> ParsingJob:
83
68
  """
84
69
  Parameters:
85
70
  - project_id: typing.Optional[str].
86
71
 
87
72
  - organization_id: typing.Optional[str].
73
+ ---
74
+ from llama_cloud.client import AsyncLlamaCloud
88
75
 
89
- - configuration: str.
90
-
91
- - file: typing.Optional[str].
76
+ client = AsyncLlamaCloud(
77
+ token="YOUR_TOKEN",
78
+ )
79
+ await client.alpha.upload_file_v_2()
92
80
  """
93
- _request: typing.Dict[str, typing.Any] = {"configuration": configuration}
94
- if file is not OMIT:
95
- _request["file"] = file
96
81
  _response = await self._client_wrapper.httpx_client.request(
97
82
  "POST",
98
83
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v2alpha1/parse/upload"),
99
84
  params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
100
- json=jsonable_encoder(_request),
101
85
  headers=self._client_wrapper.get_headers(),
102
86
  timeout=60,
103
87
  )