llama-cloud 0.1.36__py3-none-any.whl → 0.1.38__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of llama-cloud might be problematic. Click here for more details.
- llama_cloud/__init__.py +20 -4
- llama_cloud/client.py +3 -0
- llama_cloud/resources/__init__.py +3 -3
- llama_cloud/resources/admin/client.py +57 -0
- llama_cloud/resources/alpha/__init__.py +2 -0
- llama_cloud/resources/alpha/client.py +118 -0
- llama_cloud/resources/beta/client.py +576 -20
- llama_cloud/resources/chat_apps/client.py +32 -8
- llama_cloud/resources/classifier/client.py +139 -11
- llama_cloud/resources/data_sinks/client.py +32 -8
- llama_cloud/resources/data_sources/client.py +32 -8
- llama_cloud/resources/data_sources/types/data_source_update_component.py +2 -0
- llama_cloud/resources/embedding_model_configs/client.py +48 -12
- llama_cloud/resources/files/__init__.py +2 -2
- llama_cloud/resources/files/client.py +189 -113
- llama_cloud/resources/files/types/__init__.py +1 -3
- llama_cloud/resources/jobs/client.py +12 -6
- llama_cloud/resources/llama_extract/client.py +138 -32
- llama_cloud/resources/organizations/client.py +18 -4
- llama_cloud/resources/parsing/client.py +16 -4
- llama_cloud/resources/pipelines/client.py +32 -8
- llama_cloud/resources/projects/client.py +78 -18
- llama_cloud/resources/reports/client.py +126 -30
- llama_cloud/resources/retrievers/client.py +48 -12
- llama_cloud/types/__init__.py +20 -2
- llama_cloud/types/agent_deployment_summary.py +1 -0
- llama_cloud/types/classify_job.py +2 -0
- llama_cloud/types/cloud_jira_data_source_v_2.py +52 -0
- llama_cloud/types/cloud_jira_data_source_v_2_api_version.py +21 -0
- llama_cloud/types/configurable_data_source_names.py +4 -0
- llama_cloud/types/data_source_component.py +2 -0
- llama_cloud/types/data_source_create_component.py +2 -0
- llama_cloud/types/data_source_reader_version_metadata_reader_version.py +9 -1
- llama_cloud/types/file_create.py +41 -0
- llama_cloud/types/{classify_job_with_status.py → file_filter.py} +8 -15
- llama_cloud/types/file_query_response.py +38 -0
- llama_cloud/types/llama_extract_mode_availability.py +37 -0
- llama_cloud/types/llama_extract_mode_availability_status.py +17 -0
- llama_cloud/types/paginated_response_classify_job.py +34 -0
- llama_cloud/types/pipeline_data_source_component.py +2 -0
- llama_cloud/types/usage_response_active_alerts_item.py +4 -0
- {llama_cloud-0.1.36.dist-info → llama_cloud-0.1.38.dist-info}/METADATA +2 -1
- {llama_cloud-0.1.36.dist-info → llama_cloud-0.1.38.dist-info}/RECORD +47 -38
- {llama_cloud-0.1.36.dist-info → llama_cloud-0.1.38.dist-info}/WHEEL +1 -1
- /llama_cloud/{resources/files/types → types}/file_create_permission_info_value.py +0 -0
- /llama_cloud/{resources/files/types → types}/file_create_resource_info_value.py +0 -0
- {llama_cloud-0.1.36.dist-info → llama_cloud-0.1.38.dist-info}/LICENSE +0 -0
llama_cloud/__init__.py
CHANGED
|
@@ -41,7 +41,6 @@ from .types import (
|
|
|
41
41
|
ClassifierRule,
|
|
42
42
|
ClassifyJob,
|
|
43
43
|
ClassifyJobResults,
|
|
44
|
-
ClassifyJobWithStatus,
|
|
45
44
|
ClassifyParsingConfiguration,
|
|
46
45
|
CloudAstraDbVectorStore,
|
|
47
46
|
CloudAzStorageBlobDataSource,
|
|
@@ -51,6 +50,8 @@ from .types import (
|
|
|
51
50
|
CloudDocument,
|
|
52
51
|
CloudDocumentCreate,
|
|
53
52
|
CloudJiraDataSource,
|
|
53
|
+
CloudJiraDataSourceV2,
|
|
54
|
+
CloudJiraDataSourceV2ApiVersion,
|
|
54
55
|
CloudMilvusVectorStore,
|
|
55
56
|
CloudMongoDbAtlasVectorSearch,
|
|
56
57
|
CloudNotionPageDataSource,
|
|
@@ -141,10 +142,15 @@ from .types import (
|
|
|
141
142
|
File,
|
|
142
143
|
FileClassification,
|
|
143
144
|
FileCountByStatusResponse,
|
|
145
|
+
FileCreate,
|
|
146
|
+
FileCreatePermissionInfoValue,
|
|
147
|
+
FileCreateResourceInfoValue,
|
|
144
148
|
FileData,
|
|
149
|
+
FileFilter,
|
|
145
150
|
FileIdPresignedUrl,
|
|
146
151
|
FileParsePublic,
|
|
147
152
|
FilePermissionInfoValue,
|
|
153
|
+
FileQueryResponse,
|
|
148
154
|
FileResourceInfoValue,
|
|
149
155
|
FileStoreInfoResponse,
|
|
150
156
|
FileStoreInfoResponseStatus,
|
|
@@ -184,6 +190,8 @@ from .types import (
|
|
|
184
190
|
LLamaParseTransformConfig,
|
|
185
191
|
LegacyParseJobConfig,
|
|
186
192
|
LicenseInfoResponse,
|
|
193
|
+
LlamaExtractModeAvailability,
|
|
194
|
+
LlamaExtractModeAvailabilityStatus,
|
|
187
195
|
LlamaExtractSettings,
|
|
188
196
|
LlamaIndexCoreBaseLlmsTypesChatMessage,
|
|
189
197
|
LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem,
|
|
@@ -228,6 +236,7 @@ from .types import (
|
|
|
228
236
|
PaginatedReportResponse,
|
|
229
237
|
PaginatedResponseAgentData,
|
|
230
238
|
PaginatedResponseAggregateGroup,
|
|
239
|
+
PaginatedResponseClassifyJob,
|
|
231
240
|
PaginatedResponseQuotaConfiguration,
|
|
232
241
|
ParseJobConfig,
|
|
233
242
|
ParseJobConfigPriority,
|
|
@@ -392,8 +401,6 @@ from .resources import (
|
|
|
392
401
|
ExtractStatelessRequestDataSchema,
|
|
393
402
|
ExtractStatelessRequestDataSchemaZeroValue,
|
|
394
403
|
FileCreateFromUrlResourceInfoValue,
|
|
395
|
-
FileCreatePermissionInfoValue,
|
|
396
|
-
FileCreateResourceInfoValue,
|
|
397
404
|
PipelineFileUpdateCustomMetadataValue,
|
|
398
405
|
PipelineUpdateEmbeddingConfig,
|
|
399
406
|
PipelineUpdateEmbeddingConfig_AzureEmbedding,
|
|
@@ -408,6 +415,7 @@ from .resources import (
|
|
|
408
415
|
UpdateReportPlanApiV1ReportsReportIdPlanPatchRequestAction,
|
|
409
416
|
admin,
|
|
410
417
|
agent_deployments,
|
|
418
|
+
alpha,
|
|
411
419
|
beta,
|
|
412
420
|
chat_apps,
|
|
413
421
|
classifier,
|
|
@@ -469,7 +477,6 @@ __all__ = [
|
|
|
469
477
|
"ClassifierRule",
|
|
470
478
|
"ClassifyJob",
|
|
471
479
|
"ClassifyJobResults",
|
|
472
|
-
"ClassifyJobWithStatus",
|
|
473
480
|
"ClassifyParsingConfiguration",
|
|
474
481
|
"CloudAstraDbVectorStore",
|
|
475
482
|
"CloudAzStorageBlobDataSource",
|
|
@@ -479,6 +486,8 @@ __all__ = [
|
|
|
479
486
|
"CloudDocument",
|
|
480
487
|
"CloudDocumentCreate",
|
|
481
488
|
"CloudJiraDataSource",
|
|
489
|
+
"CloudJiraDataSourceV2",
|
|
490
|
+
"CloudJiraDataSourceV2ApiVersion",
|
|
482
491
|
"CloudMilvusVectorStore",
|
|
483
492
|
"CloudMongoDbAtlasVectorSearch",
|
|
484
493
|
"CloudNotionPageDataSource",
|
|
@@ -590,13 +599,16 @@ __all__ = [
|
|
|
590
599
|
"File",
|
|
591
600
|
"FileClassification",
|
|
592
601
|
"FileCountByStatusResponse",
|
|
602
|
+
"FileCreate",
|
|
593
603
|
"FileCreateFromUrlResourceInfoValue",
|
|
594
604
|
"FileCreatePermissionInfoValue",
|
|
595
605
|
"FileCreateResourceInfoValue",
|
|
596
606
|
"FileData",
|
|
607
|
+
"FileFilter",
|
|
597
608
|
"FileIdPresignedUrl",
|
|
598
609
|
"FileParsePublic",
|
|
599
610
|
"FilePermissionInfoValue",
|
|
611
|
+
"FileQueryResponse",
|
|
600
612
|
"FileResourceInfoValue",
|
|
601
613
|
"FileStoreInfoResponse",
|
|
602
614
|
"FileStoreInfoResponseStatus",
|
|
@@ -637,6 +649,8 @@ __all__ = [
|
|
|
637
649
|
"LegacyParseJobConfig",
|
|
638
650
|
"LicenseInfoResponse",
|
|
639
651
|
"LlamaCloudEnvironment",
|
|
652
|
+
"LlamaExtractModeAvailability",
|
|
653
|
+
"LlamaExtractModeAvailabilityStatus",
|
|
640
654
|
"LlamaExtractSettings",
|
|
641
655
|
"LlamaIndexCoreBaseLlmsTypesChatMessage",
|
|
642
656
|
"LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem",
|
|
@@ -681,6 +695,7 @@ __all__ = [
|
|
|
681
695
|
"PaginatedReportResponse",
|
|
682
696
|
"PaginatedResponseAgentData",
|
|
683
697
|
"PaginatedResponseAggregateGroup",
|
|
698
|
+
"PaginatedResponseClassifyJob",
|
|
684
699
|
"PaginatedResponseQuotaConfiguration",
|
|
685
700
|
"ParseJobConfig",
|
|
686
701
|
"ParseJobConfigPriority",
|
|
@@ -835,6 +850,7 @@ __all__ = [
|
|
|
835
850
|
"WebhookConfigurationWebhookEventsItem",
|
|
836
851
|
"admin",
|
|
837
852
|
"agent_deployments",
|
|
853
|
+
"alpha",
|
|
838
854
|
"beta",
|
|
839
855
|
"chat_apps",
|
|
840
856
|
"classifier",
|
llama_cloud/client.py
CHANGED
|
@@ -8,6 +8,7 @@ from .core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
|
|
|
8
8
|
from .environment import LlamaCloudEnvironment
|
|
9
9
|
from .resources.admin.client import AdminClient, AsyncAdminClient
|
|
10
10
|
from .resources.agent_deployments.client import AgentDeploymentsClient, AsyncAgentDeploymentsClient
|
|
11
|
+
from .resources.alpha.client import AlphaClient, AsyncAlphaClient
|
|
11
12
|
from .resources.beta.client import AsyncBetaClient, BetaClient
|
|
12
13
|
from .resources.chat_apps.client import AsyncChatAppsClient, ChatAppsClient
|
|
13
14
|
from .resources.classifier.client import AsyncClassifierClient, ClassifierClient
|
|
@@ -61,6 +62,7 @@ class LlamaCloud:
|
|
|
61
62
|
self.llama_extract = LlamaExtractClient(client_wrapper=self._client_wrapper)
|
|
62
63
|
self.reports = ReportsClient(client_wrapper=self._client_wrapper)
|
|
63
64
|
self.beta = BetaClient(client_wrapper=self._client_wrapper)
|
|
65
|
+
self.alpha = AlphaClient(client_wrapper=self._client_wrapper)
|
|
64
66
|
|
|
65
67
|
|
|
66
68
|
class AsyncLlamaCloud:
|
|
@@ -97,6 +99,7 @@ class AsyncLlamaCloud:
|
|
|
97
99
|
self.llama_extract = AsyncLlamaExtractClient(client_wrapper=self._client_wrapper)
|
|
98
100
|
self.reports = AsyncReportsClient(client_wrapper=self._client_wrapper)
|
|
99
101
|
self.beta = AsyncBetaClient(client_wrapper=self._client_wrapper)
|
|
102
|
+
self.alpha = AsyncAlphaClient(client_wrapper=self._client_wrapper)
|
|
100
103
|
|
|
101
104
|
|
|
102
105
|
def _get_base_url(*, base_url: typing.Optional[str] = None, environment: LlamaCloudEnvironment) -> str:
|
|
@@ -3,6 +3,7 @@
|
|
|
3
3
|
from . import (
|
|
4
4
|
admin,
|
|
5
5
|
agent_deployments,
|
|
6
|
+
alpha,
|
|
6
7
|
beta,
|
|
7
8
|
chat_apps,
|
|
8
9
|
classifier,
|
|
@@ -33,7 +34,7 @@ from .embedding_model_configs import (
|
|
|
33
34
|
EmbeddingModelConfigCreateEmbeddingConfig_OpenaiEmbedding,
|
|
34
35
|
EmbeddingModelConfigCreateEmbeddingConfig_VertexaiEmbedding,
|
|
35
36
|
)
|
|
36
|
-
from .files import FileCreateFromUrlResourceInfoValue
|
|
37
|
+
from .files import FileCreateFromUrlResourceInfoValue
|
|
37
38
|
from .llama_extract import (
|
|
38
39
|
ExtractAgentCreateDataSchema,
|
|
39
40
|
ExtractAgentCreateDataSchemaZeroValue,
|
|
@@ -84,8 +85,6 @@ __all__ = [
|
|
|
84
85
|
"ExtractStatelessRequestDataSchema",
|
|
85
86
|
"ExtractStatelessRequestDataSchemaZeroValue",
|
|
86
87
|
"FileCreateFromUrlResourceInfoValue",
|
|
87
|
-
"FileCreatePermissionInfoValue",
|
|
88
|
-
"FileCreateResourceInfoValue",
|
|
89
88
|
"PipelineFileUpdateCustomMetadataValue",
|
|
90
89
|
"PipelineUpdateEmbeddingConfig",
|
|
91
90
|
"PipelineUpdateEmbeddingConfig_AzureEmbedding",
|
|
@@ -100,6 +99,7 @@ __all__ = [
|
|
|
100
99
|
"UpdateReportPlanApiV1ReportsReportIdPlanPatchRequestAction",
|
|
101
100
|
"admin",
|
|
102
101
|
"agent_deployments",
|
|
102
|
+
"alpha",
|
|
103
103
|
"beta",
|
|
104
104
|
"chat_apps",
|
|
105
105
|
"classifier",
|
|
@@ -11,6 +11,7 @@ from ...errors.unprocessable_entity_error import UnprocessableEntityError
|
|
|
11
11
|
from ...types.file_store_info_response import FileStoreInfoResponse
|
|
12
12
|
from ...types.http_validation_error import HttpValidationError
|
|
13
13
|
from ...types.license_info_response import LicenseInfoResponse
|
|
14
|
+
from ...types.llama_extract_mode_availability import LlamaExtractModeAvailability
|
|
14
15
|
|
|
15
16
|
try:
|
|
16
17
|
import pydantic
|
|
@@ -79,6 +80,34 @@ class AdminClient:
|
|
|
79
80
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
80
81
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
81
82
|
|
|
83
|
+
def get_llamaextract_features(self) -> typing.List[LlamaExtractModeAvailability]:
|
|
84
|
+
"""
|
|
85
|
+
Get LlamaExtract feature availability based on available models.
|
|
86
|
+
|
|
87
|
+
---
|
|
88
|
+
from llama_cloud.client import LlamaCloud
|
|
89
|
+
|
|
90
|
+
client = LlamaCloud(
|
|
91
|
+
token="YOUR_TOKEN",
|
|
92
|
+
)
|
|
93
|
+
client.admin.get_llamaextract_features()
|
|
94
|
+
"""
|
|
95
|
+
_response = self._client_wrapper.httpx_client.request(
|
|
96
|
+
"GET",
|
|
97
|
+
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/admin/llamaextract/features"),
|
|
98
|
+
headers=self._client_wrapper.get_headers(),
|
|
99
|
+
timeout=60,
|
|
100
|
+
)
|
|
101
|
+
if 200 <= _response.status_code < 300:
|
|
102
|
+
return pydantic.parse_obj_as(typing.List[LlamaExtractModeAvailability], _response.json()) # type: ignore
|
|
103
|
+
if _response.status_code == 422:
|
|
104
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
105
|
+
try:
|
|
106
|
+
_response_json = _response.json()
|
|
107
|
+
except JSONDecodeError:
|
|
108
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
109
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
110
|
+
|
|
82
111
|
|
|
83
112
|
class AsyncAdminClient:
|
|
84
113
|
def __init__(self, *, client_wrapper: AsyncClientWrapper):
|
|
@@ -137,3 +166,31 @@ class AsyncAdminClient:
|
|
|
137
166
|
except JSONDecodeError:
|
|
138
167
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
139
168
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
169
|
+
|
|
170
|
+
async def get_llamaextract_features(self) -> typing.List[LlamaExtractModeAvailability]:
|
|
171
|
+
"""
|
|
172
|
+
Get LlamaExtract feature availability based on available models.
|
|
173
|
+
|
|
174
|
+
---
|
|
175
|
+
from llama_cloud.client import AsyncLlamaCloud
|
|
176
|
+
|
|
177
|
+
client = AsyncLlamaCloud(
|
|
178
|
+
token="YOUR_TOKEN",
|
|
179
|
+
)
|
|
180
|
+
await client.admin.get_llamaextract_features()
|
|
181
|
+
"""
|
|
182
|
+
_response = await self._client_wrapper.httpx_client.request(
|
|
183
|
+
"GET",
|
|
184
|
+
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/admin/llamaextract/features"),
|
|
185
|
+
headers=self._client_wrapper.get_headers(),
|
|
186
|
+
timeout=60,
|
|
187
|
+
)
|
|
188
|
+
if 200 <= _response.status_code < 300:
|
|
189
|
+
return pydantic.parse_obj_as(typing.List[LlamaExtractModeAvailability], _response.json()) # type: ignore
|
|
190
|
+
if _response.status_code == 422:
|
|
191
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
192
|
+
try:
|
|
193
|
+
_response_json = _response.json()
|
|
194
|
+
except JSONDecodeError:
|
|
195
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
196
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
@@ -0,0 +1,118 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
import typing
|
|
4
|
+
import urllib.parse
|
|
5
|
+
from json.decoder import JSONDecodeError
|
|
6
|
+
|
|
7
|
+
from ...core.api_error import ApiError
|
|
8
|
+
from ...core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
|
|
9
|
+
from ...core.jsonable_encoder import jsonable_encoder
|
|
10
|
+
from ...core.remove_none_from_dict import remove_none_from_dict
|
|
11
|
+
from ...errors.unprocessable_entity_error import UnprocessableEntityError
|
|
12
|
+
from ...types.http_validation_error import HttpValidationError
|
|
13
|
+
from ...types.parsing_job import ParsingJob
|
|
14
|
+
|
|
15
|
+
try:
|
|
16
|
+
import pydantic
|
|
17
|
+
if pydantic.__version__.startswith("1."):
|
|
18
|
+
raise ImportError
|
|
19
|
+
import pydantic.v1 as pydantic # type: ignore
|
|
20
|
+
except ImportError:
|
|
21
|
+
import pydantic # type: ignore
|
|
22
|
+
|
|
23
|
+
# this is used as the default value for optional parameters
|
|
24
|
+
OMIT = typing.cast(typing.Any, ...)
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
class AlphaClient:
|
|
28
|
+
def __init__(self, *, client_wrapper: SyncClientWrapper):
|
|
29
|
+
self._client_wrapper = client_wrapper
|
|
30
|
+
|
|
31
|
+
def upload_file_v_2(
|
|
32
|
+
self,
|
|
33
|
+
*,
|
|
34
|
+
project_id: typing.Optional[str] = None,
|
|
35
|
+
organization_id: typing.Optional[str] = None,
|
|
36
|
+
configuration: str,
|
|
37
|
+
file: typing.Optional[str] = OMIT,
|
|
38
|
+
project_id: typing.Optional[str] = None,
|
|
39
|
+
) -> ParsingJob:
|
|
40
|
+
"""
|
|
41
|
+
Parameters:
|
|
42
|
+
- project_id: typing.Optional[str].
|
|
43
|
+
|
|
44
|
+
- organization_id: typing.Optional[str].
|
|
45
|
+
|
|
46
|
+
- configuration: str.
|
|
47
|
+
|
|
48
|
+
- file: typing.Optional[str].
|
|
49
|
+
|
|
50
|
+
- project_id: typing.Optional[str].
|
|
51
|
+
"""
|
|
52
|
+
_request: typing.Dict[str, typing.Any] = {"configuration": configuration}
|
|
53
|
+
if file is not OMIT:
|
|
54
|
+
_request["file"] = file
|
|
55
|
+
_response = self._client_wrapper.httpx_client.request(
|
|
56
|
+
"POST",
|
|
57
|
+
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v2alpha1/parse/upload"),
|
|
58
|
+
params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
|
|
59
|
+
json=jsonable_encoder(_request),
|
|
60
|
+
headers=remove_none_from_dict({**self._client_wrapper.get_headers(), "Project-Id": project_id}),
|
|
61
|
+
timeout=60,
|
|
62
|
+
)
|
|
63
|
+
if 200 <= _response.status_code < 300:
|
|
64
|
+
return pydantic.parse_obj_as(ParsingJob, _response.json()) # type: ignore
|
|
65
|
+
if _response.status_code == 422:
|
|
66
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
67
|
+
try:
|
|
68
|
+
_response_json = _response.json()
|
|
69
|
+
except JSONDecodeError:
|
|
70
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
71
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
class AsyncAlphaClient:
|
|
75
|
+
def __init__(self, *, client_wrapper: AsyncClientWrapper):
|
|
76
|
+
self._client_wrapper = client_wrapper
|
|
77
|
+
|
|
78
|
+
async def upload_file_v_2(
|
|
79
|
+
self,
|
|
80
|
+
*,
|
|
81
|
+
project_id: typing.Optional[str] = None,
|
|
82
|
+
organization_id: typing.Optional[str] = None,
|
|
83
|
+
configuration: str,
|
|
84
|
+
file: typing.Optional[str] = OMIT,
|
|
85
|
+
project_id: typing.Optional[str] = None,
|
|
86
|
+
) -> ParsingJob:
|
|
87
|
+
"""
|
|
88
|
+
Parameters:
|
|
89
|
+
- project_id: typing.Optional[str].
|
|
90
|
+
|
|
91
|
+
- organization_id: typing.Optional[str].
|
|
92
|
+
|
|
93
|
+
- configuration: str.
|
|
94
|
+
|
|
95
|
+
- file: typing.Optional[str].
|
|
96
|
+
|
|
97
|
+
- project_id: typing.Optional[str].
|
|
98
|
+
"""
|
|
99
|
+
_request: typing.Dict[str, typing.Any] = {"configuration": configuration}
|
|
100
|
+
if file is not OMIT:
|
|
101
|
+
_request["file"] = file
|
|
102
|
+
_response = await self._client_wrapper.httpx_client.request(
|
|
103
|
+
"POST",
|
|
104
|
+
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v2alpha1/parse/upload"),
|
|
105
|
+
params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
|
|
106
|
+
json=jsonable_encoder(_request),
|
|
107
|
+
headers=remove_none_from_dict({**self._client_wrapper.get_headers(), "Project-Id": project_id}),
|
|
108
|
+
timeout=60,
|
|
109
|
+
)
|
|
110
|
+
if 200 <= _response.status_code < 300:
|
|
111
|
+
return pydantic.parse_obj_as(ParsingJob, _response.json()) # type: ignore
|
|
112
|
+
if _response.status_code == 422:
|
|
113
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
114
|
+
try:
|
|
115
|
+
_response_json = _response.json()
|
|
116
|
+
except JSONDecodeError:
|
|
117
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
118
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|