llama-cloud 0.0.1__py3-none-any.whl → 0.0.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of llama-cloud might be problematic. Click here for more details.
- llama_cloud/client.py +2 -2
- llama_cloud/resources/api_keys/client.py +16 -16
- llama_cloud/resources/billing/client.py +12 -12
- llama_cloud/resources/component_definitions/client.py +12 -12
- llama_cloud/resources/data_sinks/client.py +24 -24
- llama_cloud/resources/data_sources/client.py +24 -24
- llama_cloud/resources/deprecated/client.py +48 -48
- llama_cloud/resources/evals/client.py +40 -40
- llama_cloud/resources/files/client.py +24 -24
- llama_cloud/resources/parsing/client.py +48 -48
- llama_cloud/resources/pipelines/client.py +223 -152
- llama_cloud/resources/projects/client.py +64 -64
- {llama_cloud-0.0.1.dist-info → llama_cloud-0.0.2.dist-info}/METADATA +1 -1
- {llama_cloud-0.0.1.dist-info → llama_cloud-0.0.2.dist-info}/RECORD +16 -16
- {llama_cloud-0.0.1.dist-info → llama_cloud-0.0.2.dist-info}/LICENSE +0 -0
- {llama_cloud-0.0.1.dist-info → llama_cloud-0.0.2.dist-info}/WHEEL +0 -0
|
@@ -18,6 +18,7 @@ from ...types.eval_execution_params import EvalExecutionParams
|
|
|
18
18
|
from ...types.eval_execution_params_override import EvalExecutionParamsOverride
|
|
19
19
|
from ...types.eval_question_result import EvalQuestionResult
|
|
20
20
|
from ...types.http_validation_error import HttpValidationError
|
|
21
|
+
from ...types.managed_ingestion_status import ManagedIngestionStatus
|
|
21
22
|
from ...types.metadata_filters import MetadataFilters
|
|
22
23
|
from ...types.pipeline import Pipeline
|
|
23
24
|
from ...types.pipeline_create import PipelineCreate
|
|
@@ -63,9 +64,9 @@ class PipelinesClient:
|
|
|
63
64
|
- pipeline_type: typing.Optional[PipelineType].
|
|
64
65
|
---
|
|
65
66
|
from platform import PipelineType
|
|
66
|
-
from platform.client import
|
|
67
|
+
from platform.client import LlamaCloud
|
|
67
68
|
|
|
68
|
-
client =
|
|
69
|
+
client = LlamaCloud(
|
|
69
70
|
token="YOUR_TOKEN",
|
|
70
71
|
base_url="https://yourhost.com/path/to/api",
|
|
71
72
|
)
|
|
@@ -113,9 +114,9 @@ class PipelinesClient:
|
|
|
113
114
|
PresetRetrievalParams,
|
|
114
115
|
SupportedEvalLlmModelNames,
|
|
115
116
|
)
|
|
116
|
-
from platform.client import
|
|
117
|
+
from platform.client import LlamaCloud
|
|
117
118
|
|
|
118
|
-
client =
|
|
119
|
+
client = LlamaCloud(
|
|
119
120
|
token="YOUR_TOKEN",
|
|
120
121
|
base_url="https://yourhost.com/path/to/api",
|
|
121
122
|
)
|
|
@@ -178,9 +179,9 @@ class PipelinesClient:
|
|
|
178
179
|
PresetRetrievalParams,
|
|
179
180
|
SupportedEvalLlmModelNames,
|
|
180
181
|
)
|
|
181
|
-
from platform.client import
|
|
182
|
+
from platform.client import LlamaCloud
|
|
182
183
|
|
|
183
|
-
client =
|
|
184
|
+
client = LlamaCloud(
|
|
184
185
|
token="YOUR_TOKEN",
|
|
185
186
|
base_url="https://yourhost.com/path/to/api",
|
|
186
187
|
)
|
|
@@ -233,9 +234,9 @@ class PipelinesClient:
|
|
|
233
234
|
|
|
234
235
|
- with_managed_ingestion_status: typing.Optional[bool].
|
|
235
236
|
---
|
|
236
|
-
from platform.client import
|
|
237
|
+
from platform.client import LlamaCloud
|
|
237
238
|
|
|
238
|
-
client =
|
|
239
|
+
client = LlamaCloud(
|
|
239
240
|
token="YOUR_TOKEN",
|
|
240
241
|
base_url="https://yourhost.com/path/to/api",
|
|
241
242
|
)
|
|
@@ -304,9 +305,9 @@ class PipelinesClient:
|
|
|
304
305
|
PresetRetrievalParams,
|
|
305
306
|
SupportedEvalLlmModelNames,
|
|
306
307
|
)
|
|
307
|
-
from platform.client import
|
|
308
|
+
from platform.client import LlamaCloud
|
|
308
309
|
|
|
309
|
-
client =
|
|
310
|
+
client = LlamaCloud(
|
|
310
311
|
token="YOUR_TOKEN",
|
|
311
312
|
base_url="https://yourhost.com/path/to/api",
|
|
312
313
|
)
|
|
@@ -368,9 +369,9 @@ class PipelinesClient:
|
|
|
368
369
|
Parameters:
|
|
369
370
|
- pipeline_id: str.
|
|
370
371
|
---
|
|
371
|
-
from platform.client import
|
|
372
|
+
from platform.client import LlamaCloud
|
|
372
373
|
|
|
373
|
-
client =
|
|
374
|
+
client = LlamaCloud(
|
|
374
375
|
token="YOUR_TOKEN",
|
|
375
376
|
base_url="https://yourhost.com/path/to/api",
|
|
376
377
|
)
|
|
@@ -401,9 +402,9 @@ class PipelinesClient:
|
|
|
401
402
|
Parameters:
|
|
402
403
|
- pipeline_id: str.
|
|
403
404
|
---
|
|
404
|
-
from platform.client import
|
|
405
|
+
from platform.client import LlamaCloud
|
|
405
406
|
|
|
406
|
-
client =
|
|
407
|
+
client = LlamaCloud(
|
|
407
408
|
token="YOUR_TOKEN",
|
|
408
409
|
base_url="https://yourhost.com/path/to/api",
|
|
409
410
|
)
|
|
@@ -436,9 +437,9 @@ class PipelinesClient:
|
|
|
436
437
|
|
|
437
438
|
- eval_dataset_id: str.
|
|
438
439
|
---
|
|
439
|
-
from platform.client import
|
|
440
|
+
from platform.client import LlamaCloud
|
|
440
441
|
|
|
441
|
-
client =
|
|
442
|
+
client = LlamaCloud(
|
|
442
443
|
token="YOUR_TOKEN",
|
|
443
444
|
base_url="https://yourhost.com/path/to/api",
|
|
444
445
|
)
|
|
@@ -487,9 +488,9 @@ class PipelinesClient:
|
|
|
487
488
|
- params: typing.Optional[EvalExecutionParamsOverride]. The parameters for the eval execution that will override the ones set in the pipeline.
|
|
488
489
|
---
|
|
489
490
|
from platform import EvalExecutionParamsOverride, SupportedEvalLlmModelNames
|
|
490
|
-
from platform.client import
|
|
491
|
+
from platform.client import LlamaCloud
|
|
491
492
|
|
|
492
|
-
client =
|
|
493
|
+
client = LlamaCloud(
|
|
493
494
|
token="YOUR_TOKEN",
|
|
494
495
|
base_url="https://yourhost.com/path/to/api",
|
|
495
496
|
)
|
|
@@ -539,9 +540,9 @@ class PipelinesClient:
|
|
|
539
540
|
|
|
540
541
|
- eval_dataset_id: str.
|
|
541
542
|
---
|
|
542
|
-
from platform.client import
|
|
543
|
+
from platform.client import LlamaCloud
|
|
543
544
|
|
|
544
|
-
client =
|
|
545
|
+
client = LlamaCloud(
|
|
545
546
|
token="YOUR_TOKEN",
|
|
546
547
|
base_url="https://yourhost.com/path/to/api",
|
|
547
548
|
)
|
|
@@ -582,9 +583,9 @@ class PipelinesClient:
|
|
|
582
583
|
|
|
583
584
|
- eval_dataset_execution_id: str.
|
|
584
585
|
---
|
|
585
|
-
from platform.client import
|
|
586
|
+
from platform.client import LlamaCloud
|
|
586
587
|
|
|
587
|
-
client =
|
|
588
|
+
client = LlamaCloud(
|
|
588
589
|
token="YOUR_TOKEN",
|
|
589
590
|
base_url="https://yourhost.com/path/to/api",
|
|
590
591
|
)
|
|
@@ -620,9 +621,9 @@ class PipelinesClient:
|
|
|
620
621
|
Parameters:
|
|
621
622
|
- pipeline_id: str.
|
|
622
623
|
---
|
|
623
|
-
from platform.client import
|
|
624
|
+
from platform.client import LlamaCloud
|
|
624
625
|
|
|
625
|
-
client =
|
|
626
|
+
client = LlamaCloud(
|
|
626
627
|
token="YOUR_TOKEN",
|
|
627
628
|
base_url="https://yourhost.com/path/to/api",
|
|
628
629
|
)
|
|
@@ -657,9 +658,9 @@ class PipelinesClient:
|
|
|
657
658
|
|
|
658
659
|
- request: typing.List[PipelineFileCreate].
|
|
659
660
|
---
|
|
660
|
-
from platform.client import
|
|
661
|
+
from platform.client import LlamaCloud
|
|
661
662
|
|
|
662
|
-
client =
|
|
663
|
+
client = LlamaCloud(
|
|
663
664
|
token="YOUR_TOKEN",
|
|
664
665
|
base_url="https://yourhost.com/path/to/api",
|
|
665
666
|
)
|
|
@@ -694,9 +695,9 @@ class PipelinesClient:
|
|
|
694
695
|
|
|
695
696
|
- file_id: str.
|
|
696
697
|
---
|
|
697
|
-
from platform.client import
|
|
698
|
+
from platform.client import LlamaCloud
|
|
698
699
|
|
|
699
|
-
client =
|
|
700
|
+
client = LlamaCloud(
|
|
700
701
|
token="YOUR_TOKEN",
|
|
701
702
|
base_url="https://yourhost.com/path/to/api",
|
|
702
703
|
)
|
|
@@ -740,9 +741,9 @@ class PipelinesClient:
|
|
|
740
741
|
|
|
741
742
|
- custom_metadata: typing.Optional[typing.Dict[str, PipelineFileUpdateCustomMetadataValue]]. Custom metadata for the file
|
|
742
743
|
---
|
|
743
|
-
from platform.client import
|
|
744
|
+
from platform.client import LlamaCloud
|
|
744
745
|
|
|
745
|
-
client =
|
|
746
|
+
client = LlamaCloud(
|
|
746
747
|
token="YOUR_TOKEN",
|
|
747
748
|
base_url="https://yourhost.com/path/to/api",
|
|
748
749
|
)
|
|
@@ -782,9 +783,9 @@ class PipelinesClient:
|
|
|
782
783
|
|
|
783
784
|
- file_id: str.
|
|
784
785
|
---
|
|
785
|
-
from platform.client import
|
|
786
|
+
from platform.client import LlamaCloud
|
|
786
787
|
|
|
787
|
-
client =
|
|
788
|
+
client = LlamaCloud(
|
|
788
789
|
token="YOUR_TOKEN",
|
|
789
790
|
base_url="https://yourhost.com/path/to/api",
|
|
790
791
|
)
|
|
@@ -818,9 +819,9 @@ class PipelinesClient:
|
|
|
818
819
|
Parameters:
|
|
819
820
|
- pipeline_id: str.
|
|
820
821
|
---
|
|
821
|
-
from platform.client import
|
|
822
|
+
from platform.client import LlamaCloud
|
|
822
823
|
|
|
823
|
-
client =
|
|
824
|
+
client = LlamaCloud(
|
|
824
825
|
token="YOUR_TOKEN",
|
|
825
826
|
base_url="https://yourhost.com/path/to/api",
|
|
826
827
|
)
|
|
@@ -857,9 +858,9 @@ class PipelinesClient:
|
|
|
857
858
|
|
|
858
859
|
- request: typing.List[PipelineDataSourceCreate].
|
|
859
860
|
---
|
|
860
|
-
from platform.client import
|
|
861
|
+
from platform.client import LlamaCloud
|
|
861
862
|
|
|
862
|
-
client =
|
|
863
|
+
client = LlamaCloud(
|
|
863
864
|
token="YOUR_TOKEN",
|
|
864
865
|
base_url="https://yourhost.com/path/to/api",
|
|
865
866
|
)
|
|
@@ -896,9 +897,9 @@ class PipelinesClient:
|
|
|
896
897
|
|
|
897
898
|
- data_source_id: str.
|
|
898
899
|
---
|
|
899
|
-
from platform.client import
|
|
900
|
+
from platform.client import LlamaCloud
|
|
900
901
|
|
|
901
|
-
client =
|
|
902
|
+
client = LlamaCloud(
|
|
902
903
|
token="YOUR_TOKEN",
|
|
903
904
|
base_url="https://yourhost.com/path/to/api",
|
|
904
905
|
)
|
|
@@ -935,9 +936,9 @@ class PipelinesClient:
|
|
|
935
936
|
|
|
936
937
|
- data_source_id: str.
|
|
937
938
|
---
|
|
938
|
-
from platform.client import
|
|
939
|
+
from platform.client import LlamaCloud
|
|
939
940
|
|
|
940
|
-
client =
|
|
941
|
+
client = LlamaCloud(
|
|
941
942
|
token="YOUR_TOKEN",
|
|
942
943
|
base_url="https://yourhost.com/path/to/api",
|
|
943
944
|
)
|
|
@@ -998,9 +999,9 @@ class PipelinesClient:
|
|
|
998
999
|
- query: str. The query to retrieve against.
|
|
999
1000
|
---
|
|
1000
1001
|
from platform import FilterCondition, MetadataFilters
|
|
1001
|
-
from platform.client import
|
|
1002
|
+
from platform.client import LlamaCloud
|
|
1002
1003
|
|
|
1003
|
-
client =
|
|
1004
|
+
client = LlamaCloud(
|
|
1004
1005
|
token="YOUR_TOKEN",
|
|
1005
1006
|
base_url="https://yourhost.com/path/to/api",
|
|
1006
1007
|
)
|
|
@@ -1050,9 +1051,9 @@ class PipelinesClient:
|
|
|
1050
1051
|
Parameters:
|
|
1051
1052
|
- pipeline_id: str.
|
|
1052
1053
|
---
|
|
1053
|
-
from platform.client import
|
|
1054
|
+
from platform.client import LlamaCloud
|
|
1054
1055
|
|
|
1055
|
-
client =
|
|
1056
|
+
client = LlamaCloud(
|
|
1056
1057
|
token="YOUR_TOKEN",
|
|
1057
1058
|
base_url="https://yourhost.com/path/to/api",
|
|
1058
1059
|
)
|
|
@@ -1085,9 +1086,9 @@ class PipelinesClient:
|
|
|
1085
1086
|
|
|
1086
1087
|
- job_id: str.
|
|
1087
1088
|
---
|
|
1088
|
-
from platform.client import
|
|
1089
|
+
from platform.client import LlamaCloud
|
|
1089
1090
|
|
|
1090
|
-
client =
|
|
1091
|
+
client = LlamaCloud(
|
|
1091
1092
|
token="YOUR_TOKEN",
|
|
1092
1093
|
base_url="https://yourhost.com/path/to/api",
|
|
1093
1094
|
)
|
|
@@ -1127,9 +1128,9 @@ class PipelinesClient:
|
|
|
1127
1128
|
|
|
1128
1129
|
- limit: typing.Optional[int].
|
|
1129
1130
|
---
|
|
1130
|
-
from platform.client import
|
|
1131
|
+
from platform.client import LlamaCloud
|
|
1131
1132
|
|
|
1132
|
-
client =
|
|
1133
|
+
client = LlamaCloud(
|
|
1133
1134
|
token="YOUR_TOKEN",
|
|
1134
1135
|
base_url="https://yourhost.com/path/to/api",
|
|
1135
1136
|
)
|
|
@@ -1156,28 +1157,26 @@ class PipelinesClient:
|
|
|
1156
1157
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
1157
1158
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
1158
1159
|
|
|
1159
|
-
def
|
|
1160
|
+
def create_batch_pipeline_documents(
|
|
1161
|
+
self, pipeline_id: str, *, request: typing.List[CloudDocumentCreate]
|
|
1162
|
+
) -> typing.List[CloudDocument]:
|
|
1160
1163
|
"""
|
|
1161
|
-
|
|
1164
|
+
Batch create documents for a pipeline.
|
|
1162
1165
|
|
|
1163
1166
|
Parameters:
|
|
1164
1167
|
- pipeline_id: str.
|
|
1165
1168
|
|
|
1166
|
-
- request: CloudDocumentCreate.
|
|
1169
|
+
- request: typing.List[CloudDocumentCreate].
|
|
1167
1170
|
---
|
|
1168
|
-
from platform import
|
|
1169
|
-
from platform.client import PlatformApi
|
|
1171
|
+
from platform.client import LlamaCloud
|
|
1170
1172
|
|
|
1171
|
-
client =
|
|
1173
|
+
client = LlamaCloud(
|
|
1172
1174
|
token="YOUR_TOKEN",
|
|
1173
1175
|
base_url="https://yourhost.com/path/to/api",
|
|
1174
1176
|
)
|
|
1175
|
-
client.pipelines.
|
|
1177
|
+
client.pipelines.create_batch_pipeline_documents(
|
|
1176
1178
|
pipeline_id="string",
|
|
1177
|
-
request=
|
|
1178
|
-
text="string",
|
|
1179
|
-
metadata={"string": {}},
|
|
1180
|
-
),
|
|
1179
|
+
request=[],
|
|
1181
1180
|
)
|
|
1182
1181
|
"""
|
|
1183
1182
|
_response = self._client_wrapper.httpx_client.request(
|
|
@@ -1190,7 +1189,7 @@ class PipelinesClient:
|
|
|
1190
1189
|
timeout=60,
|
|
1191
1190
|
)
|
|
1192
1191
|
if 200 <= _response.status_code < 300:
|
|
1193
|
-
return pydantic.parse_obj_as(CloudDocument, _response.json()) # type: ignore
|
|
1192
|
+
return pydantic.parse_obj_as(typing.List[CloudDocument], _response.json()) # type: ignore
|
|
1194
1193
|
if _response.status_code == 422:
|
|
1195
1194
|
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
1196
1195
|
try:
|
|
@@ -1199,28 +1198,26 @@ class PipelinesClient:
|
|
|
1199
1198
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
1200
1199
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
1201
1200
|
|
|
1202
|
-
def
|
|
1201
|
+
def upsert_batch_pipeline_documents(
|
|
1202
|
+
self, pipeline_id: str, *, request: typing.List[CloudDocumentCreate]
|
|
1203
|
+
) -> typing.List[CloudDocument]:
|
|
1203
1204
|
"""
|
|
1204
|
-
|
|
1205
|
+
Batch create or update a document for a pipeline.
|
|
1205
1206
|
|
|
1206
1207
|
Parameters:
|
|
1207
1208
|
- pipeline_id: str.
|
|
1208
1209
|
|
|
1209
|
-
- request: CloudDocumentCreate.
|
|
1210
|
+
- request: typing.List[CloudDocumentCreate].
|
|
1210
1211
|
---
|
|
1211
|
-
from platform import
|
|
1212
|
-
from platform.client import PlatformApi
|
|
1212
|
+
from platform.client import LlamaCloud
|
|
1213
1213
|
|
|
1214
|
-
client =
|
|
1214
|
+
client = LlamaCloud(
|
|
1215
1215
|
token="YOUR_TOKEN",
|
|
1216
1216
|
base_url="https://yourhost.com/path/to/api",
|
|
1217
1217
|
)
|
|
1218
|
-
client.pipelines.
|
|
1218
|
+
client.pipelines.upsert_batch_pipeline_documents(
|
|
1219
1219
|
pipeline_id="string",
|
|
1220
|
-
request=
|
|
1221
|
-
text="string",
|
|
1222
|
-
metadata={"string": {}},
|
|
1223
|
-
),
|
|
1220
|
+
request=[],
|
|
1224
1221
|
)
|
|
1225
1222
|
"""
|
|
1226
1223
|
_response = self._client_wrapper.httpx_client.request(
|
|
@@ -1233,7 +1230,7 @@ class PipelinesClient:
|
|
|
1233
1230
|
timeout=60,
|
|
1234
1231
|
)
|
|
1235
1232
|
if 200 <= _response.status_code < 300:
|
|
1236
|
-
return pydantic.parse_obj_as(CloudDocument, _response.json()) # type: ignore
|
|
1233
|
+
return pydantic.parse_obj_as(typing.List[CloudDocument], _response.json()) # type: ignore
|
|
1237
1234
|
if _response.status_code == 422:
|
|
1238
1235
|
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
1239
1236
|
try:
|
|
@@ -1251,9 +1248,9 @@ class PipelinesClient:
|
|
|
1251
1248
|
|
|
1252
1249
|
- document_id: str.
|
|
1253
1250
|
---
|
|
1254
|
-
from platform.client import
|
|
1251
|
+
from platform.client import LlamaCloud
|
|
1255
1252
|
|
|
1256
|
-
client =
|
|
1253
|
+
client = LlamaCloud(
|
|
1257
1254
|
token="YOUR_TOKEN",
|
|
1258
1255
|
base_url="https://yourhost.com/path/to/api",
|
|
1259
1256
|
)
|
|
@@ -1289,9 +1286,9 @@ class PipelinesClient:
|
|
|
1289
1286
|
|
|
1290
1287
|
- document_id: str.
|
|
1291
1288
|
---
|
|
1292
|
-
from platform.client import
|
|
1289
|
+
from platform.client import LlamaCloud
|
|
1293
1290
|
|
|
1294
|
-
client =
|
|
1291
|
+
client = LlamaCloud(
|
|
1295
1292
|
token="YOUR_TOKEN",
|
|
1296
1293
|
base_url="https://yourhost.com/path/to/api",
|
|
1297
1294
|
)
|
|
@@ -1318,6 +1315,45 @@ class PipelinesClient:
|
|
|
1318
1315
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
1319
1316
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
1320
1317
|
|
|
1318
|
+
def get_pipeline_document_status(self, pipeline_id: str, document_id: str) -> ManagedIngestionStatus:
|
|
1319
|
+
"""
|
|
1320
|
+
Return a single document for a pipeline.
|
|
1321
|
+
|
|
1322
|
+
Parameters:
|
|
1323
|
+
- pipeline_id: str.
|
|
1324
|
+
|
|
1325
|
+
- document_id: str.
|
|
1326
|
+
---
|
|
1327
|
+
from platform.client import LlamaCloud
|
|
1328
|
+
|
|
1329
|
+
client = LlamaCloud(
|
|
1330
|
+
token="YOUR_TOKEN",
|
|
1331
|
+
base_url="https://yourhost.com/path/to/api",
|
|
1332
|
+
)
|
|
1333
|
+
client.pipelines.get_pipeline_document_status(
|
|
1334
|
+
pipeline_id="string",
|
|
1335
|
+
document_id="string",
|
|
1336
|
+
)
|
|
1337
|
+
"""
|
|
1338
|
+
_response = self._client_wrapper.httpx_client.request(
|
|
1339
|
+
"GET",
|
|
1340
|
+
urllib.parse.urljoin(
|
|
1341
|
+
f"{self._client_wrapper.get_base_url()}/",
|
|
1342
|
+
f"api/v1/pipelines/{pipeline_id}/documents/{document_id}/status",
|
|
1343
|
+
),
|
|
1344
|
+
headers=self._client_wrapper.get_headers(),
|
|
1345
|
+
timeout=60,
|
|
1346
|
+
)
|
|
1347
|
+
if 200 <= _response.status_code < 300:
|
|
1348
|
+
return pydantic.parse_obj_as(ManagedIngestionStatus, _response.json()) # type: ignore
|
|
1349
|
+
if _response.status_code == 422:
|
|
1350
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
1351
|
+
try:
|
|
1352
|
+
_response_json = _response.json()
|
|
1353
|
+
except JSONDecodeError:
|
|
1354
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
1355
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
1356
|
+
|
|
1321
1357
|
|
|
1322
1358
|
class AsyncPipelinesClient:
|
|
1323
1359
|
def __init__(self, *, client_wrapper: AsyncClientWrapper):
|
|
@@ -1341,9 +1377,9 @@ class AsyncPipelinesClient:
|
|
|
1341
1377
|
- pipeline_type: typing.Optional[PipelineType].
|
|
1342
1378
|
---
|
|
1343
1379
|
from platform import PipelineType
|
|
1344
|
-
from platform.client import
|
|
1380
|
+
from platform.client import AsyncLlamaCloud
|
|
1345
1381
|
|
|
1346
|
-
client =
|
|
1382
|
+
client = AsyncLlamaCloud(
|
|
1347
1383
|
token="YOUR_TOKEN",
|
|
1348
1384
|
base_url="https://yourhost.com/path/to/api",
|
|
1349
1385
|
)
|
|
@@ -1391,9 +1427,9 @@ class AsyncPipelinesClient:
|
|
|
1391
1427
|
PresetRetrievalParams,
|
|
1392
1428
|
SupportedEvalLlmModelNames,
|
|
1393
1429
|
)
|
|
1394
|
-
from platform.client import
|
|
1430
|
+
from platform.client import AsyncLlamaCloud
|
|
1395
1431
|
|
|
1396
|
-
client =
|
|
1432
|
+
client = AsyncLlamaCloud(
|
|
1397
1433
|
token="YOUR_TOKEN",
|
|
1398
1434
|
base_url="https://yourhost.com/path/to/api",
|
|
1399
1435
|
)
|
|
@@ -1456,9 +1492,9 @@ class AsyncPipelinesClient:
|
|
|
1456
1492
|
PresetRetrievalParams,
|
|
1457
1493
|
SupportedEvalLlmModelNames,
|
|
1458
1494
|
)
|
|
1459
|
-
from platform.client import
|
|
1495
|
+
from platform.client import AsyncLlamaCloud
|
|
1460
1496
|
|
|
1461
|
-
client =
|
|
1497
|
+
client = AsyncLlamaCloud(
|
|
1462
1498
|
token="YOUR_TOKEN",
|
|
1463
1499
|
base_url="https://yourhost.com/path/to/api",
|
|
1464
1500
|
)
|
|
@@ -1511,9 +1547,9 @@ class AsyncPipelinesClient:
|
|
|
1511
1547
|
|
|
1512
1548
|
- with_managed_ingestion_status: typing.Optional[bool].
|
|
1513
1549
|
---
|
|
1514
|
-
from platform.client import
|
|
1550
|
+
from platform.client import AsyncLlamaCloud
|
|
1515
1551
|
|
|
1516
|
-
client =
|
|
1552
|
+
client = AsyncLlamaCloud(
|
|
1517
1553
|
token="YOUR_TOKEN",
|
|
1518
1554
|
base_url="https://yourhost.com/path/to/api",
|
|
1519
1555
|
)
|
|
@@ -1582,9 +1618,9 @@ class AsyncPipelinesClient:
|
|
|
1582
1618
|
PresetRetrievalParams,
|
|
1583
1619
|
SupportedEvalLlmModelNames,
|
|
1584
1620
|
)
|
|
1585
|
-
from platform.client import
|
|
1621
|
+
from platform.client import AsyncLlamaCloud
|
|
1586
1622
|
|
|
1587
|
-
client =
|
|
1623
|
+
client = AsyncLlamaCloud(
|
|
1588
1624
|
token="YOUR_TOKEN",
|
|
1589
1625
|
base_url="https://yourhost.com/path/to/api",
|
|
1590
1626
|
)
|
|
@@ -1646,9 +1682,9 @@ class AsyncPipelinesClient:
|
|
|
1646
1682
|
Parameters:
|
|
1647
1683
|
- pipeline_id: str.
|
|
1648
1684
|
---
|
|
1649
|
-
from platform.client import
|
|
1685
|
+
from platform.client import AsyncLlamaCloud
|
|
1650
1686
|
|
|
1651
|
-
client =
|
|
1687
|
+
client = AsyncLlamaCloud(
|
|
1652
1688
|
token="YOUR_TOKEN",
|
|
1653
1689
|
base_url="https://yourhost.com/path/to/api",
|
|
1654
1690
|
)
|
|
@@ -1679,9 +1715,9 @@ class AsyncPipelinesClient:
|
|
|
1679
1715
|
Parameters:
|
|
1680
1716
|
- pipeline_id: str.
|
|
1681
1717
|
---
|
|
1682
|
-
from platform.client import
|
|
1718
|
+
from platform.client import AsyncLlamaCloud
|
|
1683
1719
|
|
|
1684
|
-
client =
|
|
1720
|
+
client = AsyncLlamaCloud(
|
|
1685
1721
|
token="YOUR_TOKEN",
|
|
1686
1722
|
base_url="https://yourhost.com/path/to/api",
|
|
1687
1723
|
)
|
|
@@ -1716,9 +1752,9 @@ class AsyncPipelinesClient:
|
|
|
1716
1752
|
|
|
1717
1753
|
- eval_dataset_id: str.
|
|
1718
1754
|
---
|
|
1719
|
-
from platform.client import
|
|
1755
|
+
from platform.client import AsyncLlamaCloud
|
|
1720
1756
|
|
|
1721
|
-
client =
|
|
1757
|
+
client = AsyncLlamaCloud(
|
|
1722
1758
|
token="YOUR_TOKEN",
|
|
1723
1759
|
base_url="https://yourhost.com/path/to/api",
|
|
1724
1760
|
)
|
|
@@ -1767,9 +1803,9 @@ class AsyncPipelinesClient:
|
|
|
1767
1803
|
- params: typing.Optional[EvalExecutionParamsOverride]. The parameters for the eval execution that will override the ones set in the pipeline.
|
|
1768
1804
|
---
|
|
1769
1805
|
from platform import EvalExecutionParamsOverride, SupportedEvalLlmModelNames
|
|
1770
|
-
from platform.client import
|
|
1806
|
+
from platform.client import AsyncLlamaCloud
|
|
1771
1807
|
|
|
1772
|
-
client =
|
|
1808
|
+
client = AsyncLlamaCloud(
|
|
1773
1809
|
token="YOUR_TOKEN",
|
|
1774
1810
|
base_url="https://yourhost.com/path/to/api",
|
|
1775
1811
|
)
|
|
@@ -1819,9 +1855,9 @@ class AsyncPipelinesClient:
|
|
|
1819
1855
|
|
|
1820
1856
|
- eval_dataset_id: str.
|
|
1821
1857
|
---
|
|
1822
|
-
from platform.client import
|
|
1858
|
+
from platform.client import AsyncLlamaCloud
|
|
1823
1859
|
|
|
1824
|
-
client =
|
|
1860
|
+
client = AsyncLlamaCloud(
|
|
1825
1861
|
token="YOUR_TOKEN",
|
|
1826
1862
|
base_url="https://yourhost.com/path/to/api",
|
|
1827
1863
|
)
|
|
@@ -1862,9 +1898,9 @@ class AsyncPipelinesClient:
|
|
|
1862
1898
|
|
|
1863
1899
|
- eval_dataset_execution_id: str.
|
|
1864
1900
|
---
|
|
1865
|
-
from platform.client import
|
|
1901
|
+
from platform.client import AsyncLlamaCloud
|
|
1866
1902
|
|
|
1867
|
-
client =
|
|
1903
|
+
client = AsyncLlamaCloud(
|
|
1868
1904
|
token="YOUR_TOKEN",
|
|
1869
1905
|
base_url="https://yourhost.com/path/to/api",
|
|
1870
1906
|
)
|
|
@@ -1900,9 +1936,9 @@ class AsyncPipelinesClient:
|
|
|
1900
1936
|
Parameters:
|
|
1901
1937
|
- pipeline_id: str.
|
|
1902
1938
|
---
|
|
1903
|
-
from platform.client import
|
|
1939
|
+
from platform.client import AsyncLlamaCloud
|
|
1904
1940
|
|
|
1905
|
-
client =
|
|
1941
|
+
client = AsyncLlamaCloud(
|
|
1906
1942
|
token="YOUR_TOKEN",
|
|
1907
1943
|
base_url="https://yourhost.com/path/to/api",
|
|
1908
1944
|
)
|
|
@@ -1937,9 +1973,9 @@ class AsyncPipelinesClient:
|
|
|
1937
1973
|
|
|
1938
1974
|
- request: typing.List[PipelineFileCreate].
|
|
1939
1975
|
---
|
|
1940
|
-
from platform.client import
|
|
1976
|
+
from platform.client import AsyncLlamaCloud
|
|
1941
1977
|
|
|
1942
|
-
client =
|
|
1978
|
+
client = AsyncLlamaCloud(
|
|
1943
1979
|
token="YOUR_TOKEN",
|
|
1944
1980
|
base_url="https://yourhost.com/path/to/api",
|
|
1945
1981
|
)
|
|
@@ -1974,9 +2010,9 @@ class AsyncPipelinesClient:
|
|
|
1974
2010
|
|
|
1975
2011
|
- file_id: str.
|
|
1976
2012
|
---
|
|
1977
|
-
from platform.client import
|
|
2013
|
+
from platform.client import AsyncLlamaCloud
|
|
1978
2014
|
|
|
1979
|
-
client =
|
|
2015
|
+
client = AsyncLlamaCloud(
|
|
1980
2016
|
token="YOUR_TOKEN",
|
|
1981
2017
|
base_url="https://yourhost.com/path/to/api",
|
|
1982
2018
|
)
|
|
@@ -2020,9 +2056,9 @@ class AsyncPipelinesClient:
|
|
|
2020
2056
|
|
|
2021
2057
|
- custom_metadata: typing.Optional[typing.Dict[str, PipelineFileUpdateCustomMetadataValue]]. Custom metadata for the file
|
|
2022
2058
|
---
|
|
2023
|
-
from platform.client import
|
|
2059
|
+
from platform.client import AsyncLlamaCloud
|
|
2024
2060
|
|
|
2025
|
-
client =
|
|
2061
|
+
client = AsyncLlamaCloud(
|
|
2026
2062
|
token="YOUR_TOKEN",
|
|
2027
2063
|
base_url="https://yourhost.com/path/to/api",
|
|
2028
2064
|
)
|
|
@@ -2062,9 +2098,9 @@ class AsyncPipelinesClient:
|
|
|
2062
2098
|
|
|
2063
2099
|
- file_id: str.
|
|
2064
2100
|
---
|
|
2065
|
-
from platform.client import
|
|
2101
|
+
from platform.client import AsyncLlamaCloud
|
|
2066
2102
|
|
|
2067
|
-
client =
|
|
2103
|
+
client = AsyncLlamaCloud(
|
|
2068
2104
|
token="YOUR_TOKEN",
|
|
2069
2105
|
base_url="https://yourhost.com/path/to/api",
|
|
2070
2106
|
)
|
|
@@ -2098,9 +2134,9 @@ class AsyncPipelinesClient:
|
|
|
2098
2134
|
Parameters:
|
|
2099
2135
|
- pipeline_id: str.
|
|
2100
2136
|
---
|
|
2101
|
-
from platform.client import
|
|
2137
|
+
from platform.client import AsyncLlamaCloud
|
|
2102
2138
|
|
|
2103
|
-
client =
|
|
2139
|
+
client = AsyncLlamaCloud(
|
|
2104
2140
|
token="YOUR_TOKEN",
|
|
2105
2141
|
base_url="https://yourhost.com/path/to/api",
|
|
2106
2142
|
)
|
|
@@ -2137,9 +2173,9 @@ class AsyncPipelinesClient:
|
|
|
2137
2173
|
|
|
2138
2174
|
- request: typing.List[PipelineDataSourceCreate].
|
|
2139
2175
|
---
|
|
2140
|
-
from platform.client import
|
|
2176
|
+
from platform.client import AsyncLlamaCloud
|
|
2141
2177
|
|
|
2142
|
-
client =
|
|
2178
|
+
client = AsyncLlamaCloud(
|
|
2143
2179
|
token="YOUR_TOKEN",
|
|
2144
2180
|
base_url="https://yourhost.com/path/to/api",
|
|
2145
2181
|
)
|
|
@@ -2176,9 +2212,9 @@ class AsyncPipelinesClient:
|
|
|
2176
2212
|
|
|
2177
2213
|
- data_source_id: str.
|
|
2178
2214
|
---
|
|
2179
|
-
from platform.client import
|
|
2215
|
+
from platform.client import AsyncLlamaCloud
|
|
2180
2216
|
|
|
2181
|
-
client =
|
|
2217
|
+
client = AsyncLlamaCloud(
|
|
2182
2218
|
token="YOUR_TOKEN",
|
|
2183
2219
|
base_url="https://yourhost.com/path/to/api",
|
|
2184
2220
|
)
|
|
@@ -2215,9 +2251,9 @@ class AsyncPipelinesClient:
|
|
|
2215
2251
|
|
|
2216
2252
|
- data_source_id: str.
|
|
2217
2253
|
---
|
|
2218
|
-
from platform.client import
|
|
2254
|
+
from platform.client import AsyncLlamaCloud
|
|
2219
2255
|
|
|
2220
|
-
client =
|
|
2256
|
+
client = AsyncLlamaCloud(
|
|
2221
2257
|
token="YOUR_TOKEN",
|
|
2222
2258
|
base_url="https://yourhost.com/path/to/api",
|
|
2223
2259
|
)
|
|
@@ -2278,9 +2314,9 @@ class AsyncPipelinesClient:
|
|
|
2278
2314
|
- query: str. The query to retrieve against.
|
|
2279
2315
|
---
|
|
2280
2316
|
from platform import FilterCondition, MetadataFilters
|
|
2281
|
-
from platform.client import
|
|
2317
|
+
from platform.client import AsyncLlamaCloud
|
|
2282
2318
|
|
|
2283
|
-
client =
|
|
2319
|
+
client = AsyncLlamaCloud(
|
|
2284
2320
|
token="YOUR_TOKEN",
|
|
2285
2321
|
base_url="https://yourhost.com/path/to/api",
|
|
2286
2322
|
)
|
|
@@ -2330,9 +2366,9 @@ class AsyncPipelinesClient:
|
|
|
2330
2366
|
Parameters:
|
|
2331
2367
|
- pipeline_id: str.
|
|
2332
2368
|
---
|
|
2333
|
-
from platform.client import
|
|
2369
|
+
from platform.client import AsyncLlamaCloud
|
|
2334
2370
|
|
|
2335
|
-
client =
|
|
2371
|
+
client = AsyncLlamaCloud(
|
|
2336
2372
|
token="YOUR_TOKEN",
|
|
2337
2373
|
base_url="https://yourhost.com/path/to/api",
|
|
2338
2374
|
)
|
|
@@ -2365,9 +2401,9 @@ class AsyncPipelinesClient:
|
|
|
2365
2401
|
|
|
2366
2402
|
- job_id: str.
|
|
2367
2403
|
---
|
|
2368
|
-
from platform.client import
|
|
2404
|
+
from platform.client import AsyncLlamaCloud
|
|
2369
2405
|
|
|
2370
|
-
client =
|
|
2406
|
+
client = AsyncLlamaCloud(
|
|
2371
2407
|
token="YOUR_TOKEN",
|
|
2372
2408
|
base_url="https://yourhost.com/path/to/api",
|
|
2373
2409
|
)
|
|
@@ -2407,9 +2443,9 @@ class AsyncPipelinesClient:
|
|
|
2407
2443
|
|
|
2408
2444
|
- limit: typing.Optional[int].
|
|
2409
2445
|
---
|
|
2410
|
-
from platform.client import
|
|
2446
|
+
from platform.client import AsyncLlamaCloud
|
|
2411
2447
|
|
|
2412
|
-
client =
|
|
2448
|
+
client = AsyncLlamaCloud(
|
|
2413
2449
|
token="YOUR_TOKEN",
|
|
2414
2450
|
base_url="https://yourhost.com/path/to/api",
|
|
2415
2451
|
)
|
|
@@ -2436,28 +2472,26 @@ class AsyncPipelinesClient:
|
|
|
2436
2472
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
2437
2473
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
2438
2474
|
|
|
2439
|
-
async def
|
|
2475
|
+
async def create_batch_pipeline_documents(
|
|
2476
|
+
self, pipeline_id: str, *, request: typing.List[CloudDocumentCreate]
|
|
2477
|
+
) -> typing.List[CloudDocument]:
|
|
2440
2478
|
"""
|
|
2441
|
-
|
|
2479
|
+
Batch create documents for a pipeline.
|
|
2442
2480
|
|
|
2443
2481
|
Parameters:
|
|
2444
2482
|
- pipeline_id: str.
|
|
2445
2483
|
|
|
2446
|
-
- request: CloudDocumentCreate.
|
|
2484
|
+
- request: typing.List[CloudDocumentCreate].
|
|
2447
2485
|
---
|
|
2448
|
-
from platform import
|
|
2449
|
-
from platform.client import AsyncPlatformApi
|
|
2486
|
+
from platform.client import AsyncLlamaCloud
|
|
2450
2487
|
|
|
2451
|
-
client =
|
|
2488
|
+
client = AsyncLlamaCloud(
|
|
2452
2489
|
token="YOUR_TOKEN",
|
|
2453
2490
|
base_url="https://yourhost.com/path/to/api",
|
|
2454
2491
|
)
|
|
2455
|
-
await client.pipelines.
|
|
2492
|
+
await client.pipelines.create_batch_pipeline_documents(
|
|
2456
2493
|
pipeline_id="string",
|
|
2457
|
-
request=
|
|
2458
|
-
text="string",
|
|
2459
|
-
metadata={"string": {}},
|
|
2460
|
-
),
|
|
2494
|
+
request=[],
|
|
2461
2495
|
)
|
|
2462
2496
|
"""
|
|
2463
2497
|
_response = await self._client_wrapper.httpx_client.request(
|
|
@@ -2470,7 +2504,7 @@ class AsyncPipelinesClient:
|
|
|
2470
2504
|
timeout=60,
|
|
2471
2505
|
)
|
|
2472
2506
|
if 200 <= _response.status_code < 300:
|
|
2473
|
-
return pydantic.parse_obj_as(CloudDocument, _response.json()) # type: ignore
|
|
2507
|
+
return pydantic.parse_obj_as(typing.List[CloudDocument], _response.json()) # type: ignore
|
|
2474
2508
|
if _response.status_code == 422:
|
|
2475
2509
|
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
2476
2510
|
try:
|
|
@@ -2479,28 +2513,26 @@ class AsyncPipelinesClient:
|
|
|
2479
2513
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
2480
2514
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
2481
2515
|
|
|
2482
|
-
async def
|
|
2516
|
+
async def upsert_batch_pipeline_documents(
|
|
2517
|
+
self, pipeline_id: str, *, request: typing.List[CloudDocumentCreate]
|
|
2518
|
+
) -> typing.List[CloudDocument]:
|
|
2483
2519
|
"""
|
|
2484
|
-
|
|
2520
|
+
Batch create or update a document for a pipeline.
|
|
2485
2521
|
|
|
2486
2522
|
Parameters:
|
|
2487
2523
|
- pipeline_id: str.
|
|
2488
2524
|
|
|
2489
|
-
- request: CloudDocumentCreate.
|
|
2525
|
+
- request: typing.List[CloudDocumentCreate].
|
|
2490
2526
|
---
|
|
2491
|
-
from platform import
|
|
2492
|
-
from platform.client import AsyncPlatformApi
|
|
2527
|
+
from platform.client import AsyncLlamaCloud
|
|
2493
2528
|
|
|
2494
|
-
client =
|
|
2529
|
+
client = AsyncLlamaCloud(
|
|
2495
2530
|
token="YOUR_TOKEN",
|
|
2496
2531
|
base_url="https://yourhost.com/path/to/api",
|
|
2497
2532
|
)
|
|
2498
|
-
await client.pipelines.
|
|
2533
|
+
await client.pipelines.upsert_batch_pipeline_documents(
|
|
2499
2534
|
pipeline_id="string",
|
|
2500
|
-
request=
|
|
2501
|
-
text="string",
|
|
2502
|
-
metadata={"string": {}},
|
|
2503
|
-
),
|
|
2535
|
+
request=[],
|
|
2504
2536
|
)
|
|
2505
2537
|
"""
|
|
2506
2538
|
_response = await self._client_wrapper.httpx_client.request(
|
|
@@ -2513,7 +2545,7 @@ class AsyncPipelinesClient:
|
|
|
2513
2545
|
timeout=60,
|
|
2514
2546
|
)
|
|
2515
2547
|
if 200 <= _response.status_code < 300:
|
|
2516
|
-
return pydantic.parse_obj_as(CloudDocument, _response.json()) # type: ignore
|
|
2548
|
+
return pydantic.parse_obj_as(typing.List[CloudDocument], _response.json()) # type: ignore
|
|
2517
2549
|
if _response.status_code == 422:
|
|
2518
2550
|
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
2519
2551
|
try:
|
|
@@ -2531,9 +2563,9 @@ class AsyncPipelinesClient:
|
|
|
2531
2563
|
|
|
2532
2564
|
- document_id: str.
|
|
2533
2565
|
---
|
|
2534
|
-
from platform.client import
|
|
2566
|
+
from platform.client import AsyncLlamaCloud
|
|
2535
2567
|
|
|
2536
|
-
client =
|
|
2568
|
+
client = AsyncLlamaCloud(
|
|
2537
2569
|
token="YOUR_TOKEN",
|
|
2538
2570
|
base_url="https://yourhost.com/path/to/api",
|
|
2539
2571
|
)
|
|
@@ -2569,9 +2601,9 @@ class AsyncPipelinesClient:
|
|
|
2569
2601
|
|
|
2570
2602
|
- document_id: str.
|
|
2571
2603
|
---
|
|
2572
|
-
from platform.client import
|
|
2604
|
+
from platform.client import AsyncLlamaCloud
|
|
2573
2605
|
|
|
2574
|
-
client =
|
|
2606
|
+
client = AsyncLlamaCloud(
|
|
2575
2607
|
token="YOUR_TOKEN",
|
|
2576
2608
|
base_url="https://yourhost.com/path/to/api",
|
|
2577
2609
|
)
|
|
@@ -2597,3 +2629,42 @@ class AsyncPipelinesClient:
|
|
|
2597
2629
|
except JSONDecodeError:
|
|
2598
2630
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
2599
2631
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
2632
|
+
|
|
2633
|
+
async def get_pipeline_document_status(self, pipeline_id: str, document_id: str) -> ManagedIngestionStatus:
|
|
2634
|
+
"""
|
|
2635
|
+
Return a single document for a pipeline.
|
|
2636
|
+
|
|
2637
|
+
Parameters:
|
|
2638
|
+
- pipeline_id: str.
|
|
2639
|
+
|
|
2640
|
+
- document_id: str.
|
|
2641
|
+
---
|
|
2642
|
+
from platform.client import AsyncLlamaCloud
|
|
2643
|
+
|
|
2644
|
+
client = AsyncLlamaCloud(
|
|
2645
|
+
token="YOUR_TOKEN",
|
|
2646
|
+
base_url="https://yourhost.com/path/to/api",
|
|
2647
|
+
)
|
|
2648
|
+
await client.pipelines.get_pipeline_document_status(
|
|
2649
|
+
pipeline_id="string",
|
|
2650
|
+
document_id="string",
|
|
2651
|
+
)
|
|
2652
|
+
"""
|
|
2653
|
+
_response = await self._client_wrapper.httpx_client.request(
|
|
2654
|
+
"GET",
|
|
2655
|
+
urllib.parse.urljoin(
|
|
2656
|
+
f"{self._client_wrapper.get_base_url()}/",
|
|
2657
|
+
f"api/v1/pipelines/{pipeline_id}/documents/{document_id}/status",
|
|
2658
|
+
),
|
|
2659
|
+
headers=self._client_wrapper.get_headers(),
|
|
2660
|
+
timeout=60,
|
|
2661
|
+
)
|
|
2662
|
+
if 200 <= _response.status_code < 300:
|
|
2663
|
+
return pydantic.parse_obj_as(ManagedIngestionStatus, _response.json()) # type: ignore
|
|
2664
|
+
if _response.status_code == 422:
|
|
2665
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
2666
|
+
try:
|
|
2667
|
+
_response_json = _response.json()
|
|
2668
|
+
except JSONDecodeError:
|
|
2669
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
2670
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|