llama-cloud 0.1.25__py3-none-any.whl → 0.1.27__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of llama-cloud might be problematic. Click here for more details.
- llama_cloud/__init__.py +28 -2
- llama_cloud/client.py +6 -0
- llama_cloud/resources/__init__.py +4 -0
- llama_cloud/resources/beta/client.py +14 -2
- llama_cloud/resources/llama_apps/__init__.py +2 -0
- llama_cloud/resources/llama_apps/client.py +160 -0
- llama_cloud/resources/llama_extract/client.py +129 -0
- llama_cloud/resources/parsing/client.py +8 -0
- llama_cloud/resources/responses/__init__.py +2 -0
- llama_cloud/resources/responses/client.py +137 -0
- llama_cloud/types/__init__.py +24 -2
- llama_cloud/types/agent_deployment_list.py +32 -0
- llama_cloud/types/agent_deployment_summary.py +38 -0
- llama_cloud/types/app_schema_chat_chat_message.py +2 -2
- llama_cloud/types/app_schema_responses_message_role.py +33 -0
- llama_cloud/types/cloud_google_drive_data_source.py +1 -3
- llama_cloud/types/extract_config.py +2 -0
- llama_cloud/types/extract_config_priority.py +29 -0
- llama_cloud/types/extract_models.py +8 -0
- llama_cloud/types/extract_schema_generate_response.py +38 -0
- llama_cloud/types/extract_schema_generate_response_data_schema_value.py +7 -0
- llama_cloud/types/input_message.py +2 -2
- llama_cloud/types/legacy_parse_job_config.py +1 -0
- llama_cloud/types/llama_index_core_base_llms_types_chat_message.py +2 -2
- llama_cloud/types/{message_role.py → llama_index_core_base_llms_types_message_role.py} +9 -9
- llama_cloud/types/llama_parse_parameters.py +3 -0
- llama_cloud/types/llama_parse_parameters_priority.py +29 -0
- llama_cloud/types/message.py +38 -0
- llama_cloud/types/metadata_filter.py +1 -1
- llama_cloud/types/model_configuration.py +39 -0
- llama_cloud/types/parse_job_config.py +3 -0
- llama_cloud/types/parse_job_config_priority.py +29 -0
- llama_cloud/types/text_content_block.py +34 -0
- {llama_cloud-0.1.25.dist-info → llama_cloud-0.1.27.dist-info}/METADATA +3 -2
- {llama_cloud-0.1.25.dist-info → llama_cloud-0.1.27.dist-info}/RECORD +37 -22
- {llama_cloud-0.1.25.dist-info → llama_cloud-0.1.27.dist-info}/WHEEL +1 -1
- {llama_cloud-0.1.25.dist-info → llama_cloud-0.1.27.dist-info}/LICENSE +0 -0
|
@@ -15,6 +15,7 @@ from ...types.extract_job import ExtractJob
|
|
|
15
15
|
from ...types.extract_job_create import ExtractJobCreate
|
|
16
16
|
from ...types.extract_resultset import ExtractResultset
|
|
17
17
|
from ...types.extract_run import ExtractRun
|
|
18
|
+
from ...types.extract_schema_generate_response import ExtractSchemaGenerateResponse
|
|
18
19
|
from ...types.extract_schema_validate_response import ExtractSchemaValidateResponse
|
|
19
20
|
from ...types.http_validation_error import HttpValidationError
|
|
20
21
|
from ...types.llama_extract_settings import LlamaExtractSettings
|
|
@@ -97,6 +98,7 @@ class LlamaExtractClient:
|
|
|
97
98
|
from llama_cloud import (
|
|
98
99
|
DocumentChunkMode,
|
|
99
100
|
ExtractConfig,
|
|
101
|
+
ExtractConfigPriority,
|
|
100
102
|
ExtractMode,
|
|
101
103
|
ExtractTarget,
|
|
102
104
|
)
|
|
@@ -108,6 +110,7 @@ class LlamaExtractClient:
|
|
|
108
110
|
client.llama_extract.create_extraction_agent(
|
|
109
111
|
name="string",
|
|
110
112
|
config=ExtractConfig(
|
|
113
|
+
priority=ExtractConfigPriority.LOW,
|
|
111
114
|
extraction_target=ExtractTarget.PER_DOC,
|
|
112
115
|
extraction_mode=ExtractMode.FAST,
|
|
113
116
|
chunk_mode=DocumentChunkMode.PAGE,
|
|
@@ -168,6 +171,58 @@ class LlamaExtractClient:
|
|
|
168
171
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
169
172
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
170
173
|
|
|
174
|
+
def generate_extraction_schema(
|
|
175
|
+
self,
|
|
176
|
+
*,
|
|
177
|
+
project_id: typing.Optional[str] = None,
|
|
178
|
+
organization_id: typing.Optional[str] = None,
|
|
179
|
+
prompt: typing.Optional[str] = OMIT,
|
|
180
|
+
file_id: typing.Optional[str] = OMIT,
|
|
181
|
+
) -> ExtractSchemaGenerateResponse:
|
|
182
|
+
"""
|
|
183
|
+
Generates an extraction agent's schema definition from a file and/or natural language prompt.
|
|
184
|
+
|
|
185
|
+
Parameters:
|
|
186
|
+
- project_id: typing.Optional[str].
|
|
187
|
+
|
|
188
|
+
- organization_id: typing.Optional[str].
|
|
189
|
+
|
|
190
|
+
- prompt: typing.Optional[str].
|
|
191
|
+
|
|
192
|
+
- file_id: typing.Optional[str].
|
|
193
|
+
---
|
|
194
|
+
from llama_cloud.client import LlamaCloud
|
|
195
|
+
|
|
196
|
+
client = LlamaCloud(
|
|
197
|
+
token="YOUR_TOKEN",
|
|
198
|
+
)
|
|
199
|
+
client.llama_extract.generate_extraction_schema()
|
|
200
|
+
"""
|
|
201
|
+
_request: typing.Dict[str, typing.Any] = {}
|
|
202
|
+
if prompt is not OMIT:
|
|
203
|
+
_request["prompt"] = prompt
|
|
204
|
+
if file_id is not OMIT:
|
|
205
|
+
_request["file_id"] = file_id
|
|
206
|
+
_response = self._client_wrapper.httpx_client.request(
|
|
207
|
+
"POST",
|
|
208
|
+
urllib.parse.urljoin(
|
|
209
|
+
f"{self._client_wrapper.get_base_url()}/", "api/v1/extraction/extraction-agents/schema/generate"
|
|
210
|
+
),
|
|
211
|
+
params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
|
|
212
|
+
json=jsonable_encoder(_request),
|
|
213
|
+
headers=self._client_wrapper.get_headers(),
|
|
214
|
+
timeout=60,
|
|
215
|
+
)
|
|
216
|
+
if 200 <= _response.status_code < 300:
|
|
217
|
+
return pydantic.parse_obj_as(ExtractSchemaGenerateResponse, _response.json()) # type: ignore
|
|
218
|
+
if _response.status_code == 422:
|
|
219
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
220
|
+
try:
|
|
221
|
+
_response_json = _response.json()
|
|
222
|
+
except JSONDecodeError:
|
|
223
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
224
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
225
|
+
|
|
171
226
|
def get_extraction_agent_by_name(
|
|
172
227
|
self, name: str, *, project_id: typing.Optional[str] = None, organization_id: typing.Optional[str] = None
|
|
173
228
|
) -> ExtractAgent:
|
|
@@ -253,6 +308,7 @@ class LlamaExtractClient:
|
|
|
253
308
|
from llama_cloud import (
|
|
254
309
|
DocumentChunkMode,
|
|
255
310
|
ExtractConfig,
|
|
311
|
+
ExtractConfigPriority,
|
|
256
312
|
ExtractMode,
|
|
257
313
|
ExtractTarget,
|
|
258
314
|
)
|
|
@@ -264,6 +320,7 @@ class LlamaExtractClient:
|
|
|
264
320
|
client.llama_extract.update_extraction_agent(
|
|
265
321
|
extraction_agent_id="string",
|
|
266
322
|
config=ExtractConfig(
|
|
323
|
+
priority=ExtractConfigPriority.LOW,
|
|
267
324
|
extraction_target=ExtractTarget.PER_DOC,
|
|
268
325
|
extraction_mode=ExtractMode.FAST,
|
|
269
326
|
chunk_mode=DocumentChunkMode.PAGE,
|
|
@@ -362,6 +419,7 @@ class LlamaExtractClient:
|
|
|
362
419
|
from llama_cloud import (
|
|
363
420
|
DocumentChunkMode,
|
|
364
421
|
ExtractConfig,
|
|
422
|
+
ExtractConfigPriority,
|
|
365
423
|
ExtractJobCreate,
|
|
366
424
|
ExtractMode,
|
|
367
425
|
ExtractTarget,
|
|
@@ -376,6 +434,7 @@ class LlamaExtractClient:
|
|
|
376
434
|
extraction_agent_id="string",
|
|
377
435
|
file_id="string",
|
|
378
436
|
config_override=ExtractConfig(
|
|
437
|
+
priority=ExtractConfigPriority.LOW,
|
|
379
438
|
extraction_target=ExtractTarget.PER_DOC,
|
|
380
439
|
extraction_mode=ExtractMode.FAST,
|
|
381
440
|
chunk_mode=DocumentChunkMode.PAGE,
|
|
@@ -450,12 +509,14 @@ class LlamaExtractClient:
|
|
|
450
509
|
ChunkMode,
|
|
451
510
|
DocumentChunkMode,
|
|
452
511
|
ExtractConfig,
|
|
512
|
+
ExtractConfigPriority,
|
|
453
513
|
ExtractJobCreate,
|
|
454
514
|
ExtractMode,
|
|
455
515
|
ExtractTarget,
|
|
456
516
|
FailPageMode,
|
|
457
517
|
LlamaExtractSettings,
|
|
458
518
|
LlamaParseParameters,
|
|
519
|
+
LlamaParseParametersPriority,
|
|
459
520
|
ParsingMode,
|
|
460
521
|
)
|
|
461
522
|
from llama_cloud.client import LlamaCloud
|
|
@@ -468,6 +529,7 @@ class LlamaExtractClient:
|
|
|
468
529
|
extraction_agent_id="string",
|
|
469
530
|
file_id="string",
|
|
470
531
|
config_override=ExtractConfig(
|
|
532
|
+
priority=ExtractConfigPriority.LOW,
|
|
471
533
|
extraction_target=ExtractTarget.PER_DOC,
|
|
472
534
|
extraction_mode=ExtractMode.FAST,
|
|
473
535
|
chunk_mode=DocumentChunkMode.PAGE,
|
|
@@ -476,6 +538,7 @@ class LlamaExtractClient:
|
|
|
476
538
|
extract_settings=LlamaExtractSettings(
|
|
477
539
|
chunk_mode=ChunkMode.PAGE,
|
|
478
540
|
llama_parse_params=LlamaParseParameters(
|
|
541
|
+
priority=LlamaParseParametersPriority.LOW,
|
|
479
542
|
parse_mode=ParsingMode.PARSE_PAGE_WITHOUT_LLM,
|
|
480
543
|
replace_failed_page_mode=FailPageMode.RAW_TEXT,
|
|
481
544
|
),
|
|
@@ -573,6 +636,7 @@ class LlamaExtractClient:
|
|
|
573
636
|
from llama_cloud import (
|
|
574
637
|
DocumentChunkMode,
|
|
575
638
|
ExtractConfig,
|
|
639
|
+
ExtractConfigPriority,
|
|
576
640
|
ExtractMode,
|
|
577
641
|
ExtractTarget,
|
|
578
642
|
)
|
|
@@ -585,6 +649,7 @@ class LlamaExtractClient:
|
|
|
585
649
|
extraction_agent_id="string",
|
|
586
650
|
file_ids=[],
|
|
587
651
|
config_override=ExtractConfig(
|
|
652
|
+
priority=ExtractConfigPriority.LOW,
|
|
588
653
|
extraction_target=ExtractTarget.PER_DOC,
|
|
589
654
|
extraction_mode=ExtractMode.FAST,
|
|
590
655
|
chunk_mode=DocumentChunkMode.PAGE,
|
|
@@ -892,6 +957,7 @@ class AsyncLlamaExtractClient:
|
|
|
892
957
|
from llama_cloud import (
|
|
893
958
|
DocumentChunkMode,
|
|
894
959
|
ExtractConfig,
|
|
960
|
+
ExtractConfigPriority,
|
|
895
961
|
ExtractMode,
|
|
896
962
|
ExtractTarget,
|
|
897
963
|
)
|
|
@@ -903,6 +969,7 @@ class AsyncLlamaExtractClient:
|
|
|
903
969
|
await client.llama_extract.create_extraction_agent(
|
|
904
970
|
name="string",
|
|
905
971
|
config=ExtractConfig(
|
|
972
|
+
priority=ExtractConfigPriority.LOW,
|
|
906
973
|
extraction_target=ExtractTarget.PER_DOC,
|
|
907
974
|
extraction_mode=ExtractMode.FAST,
|
|
908
975
|
chunk_mode=DocumentChunkMode.PAGE,
|
|
@@ -963,6 +1030,58 @@ class AsyncLlamaExtractClient:
|
|
|
963
1030
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
964
1031
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
965
1032
|
|
|
1033
|
+
async def generate_extraction_schema(
|
|
1034
|
+
self,
|
|
1035
|
+
*,
|
|
1036
|
+
project_id: typing.Optional[str] = None,
|
|
1037
|
+
organization_id: typing.Optional[str] = None,
|
|
1038
|
+
prompt: typing.Optional[str] = OMIT,
|
|
1039
|
+
file_id: typing.Optional[str] = OMIT,
|
|
1040
|
+
) -> ExtractSchemaGenerateResponse:
|
|
1041
|
+
"""
|
|
1042
|
+
Generates an extraction agent's schema definition from a file and/or natural language prompt.
|
|
1043
|
+
|
|
1044
|
+
Parameters:
|
|
1045
|
+
- project_id: typing.Optional[str].
|
|
1046
|
+
|
|
1047
|
+
- organization_id: typing.Optional[str].
|
|
1048
|
+
|
|
1049
|
+
- prompt: typing.Optional[str].
|
|
1050
|
+
|
|
1051
|
+
- file_id: typing.Optional[str].
|
|
1052
|
+
---
|
|
1053
|
+
from llama_cloud.client import AsyncLlamaCloud
|
|
1054
|
+
|
|
1055
|
+
client = AsyncLlamaCloud(
|
|
1056
|
+
token="YOUR_TOKEN",
|
|
1057
|
+
)
|
|
1058
|
+
await client.llama_extract.generate_extraction_schema()
|
|
1059
|
+
"""
|
|
1060
|
+
_request: typing.Dict[str, typing.Any] = {}
|
|
1061
|
+
if prompt is not OMIT:
|
|
1062
|
+
_request["prompt"] = prompt
|
|
1063
|
+
if file_id is not OMIT:
|
|
1064
|
+
_request["file_id"] = file_id
|
|
1065
|
+
_response = await self._client_wrapper.httpx_client.request(
|
|
1066
|
+
"POST",
|
|
1067
|
+
urllib.parse.urljoin(
|
|
1068
|
+
f"{self._client_wrapper.get_base_url()}/", "api/v1/extraction/extraction-agents/schema/generate"
|
|
1069
|
+
),
|
|
1070
|
+
params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
|
|
1071
|
+
json=jsonable_encoder(_request),
|
|
1072
|
+
headers=self._client_wrapper.get_headers(),
|
|
1073
|
+
timeout=60,
|
|
1074
|
+
)
|
|
1075
|
+
if 200 <= _response.status_code < 300:
|
|
1076
|
+
return pydantic.parse_obj_as(ExtractSchemaGenerateResponse, _response.json()) # type: ignore
|
|
1077
|
+
if _response.status_code == 422:
|
|
1078
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
1079
|
+
try:
|
|
1080
|
+
_response_json = _response.json()
|
|
1081
|
+
except JSONDecodeError:
|
|
1082
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
1083
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
1084
|
+
|
|
966
1085
|
async def get_extraction_agent_by_name(
|
|
967
1086
|
self, name: str, *, project_id: typing.Optional[str] = None, organization_id: typing.Optional[str] = None
|
|
968
1087
|
) -> ExtractAgent:
|
|
@@ -1048,6 +1167,7 @@ class AsyncLlamaExtractClient:
|
|
|
1048
1167
|
from llama_cloud import (
|
|
1049
1168
|
DocumentChunkMode,
|
|
1050
1169
|
ExtractConfig,
|
|
1170
|
+
ExtractConfigPriority,
|
|
1051
1171
|
ExtractMode,
|
|
1052
1172
|
ExtractTarget,
|
|
1053
1173
|
)
|
|
@@ -1059,6 +1179,7 @@ class AsyncLlamaExtractClient:
|
|
|
1059
1179
|
await client.llama_extract.update_extraction_agent(
|
|
1060
1180
|
extraction_agent_id="string",
|
|
1061
1181
|
config=ExtractConfig(
|
|
1182
|
+
priority=ExtractConfigPriority.LOW,
|
|
1062
1183
|
extraction_target=ExtractTarget.PER_DOC,
|
|
1063
1184
|
extraction_mode=ExtractMode.FAST,
|
|
1064
1185
|
chunk_mode=DocumentChunkMode.PAGE,
|
|
@@ -1157,6 +1278,7 @@ class AsyncLlamaExtractClient:
|
|
|
1157
1278
|
from llama_cloud import (
|
|
1158
1279
|
DocumentChunkMode,
|
|
1159
1280
|
ExtractConfig,
|
|
1281
|
+
ExtractConfigPriority,
|
|
1160
1282
|
ExtractJobCreate,
|
|
1161
1283
|
ExtractMode,
|
|
1162
1284
|
ExtractTarget,
|
|
@@ -1171,6 +1293,7 @@ class AsyncLlamaExtractClient:
|
|
|
1171
1293
|
extraction_agent_id="string",
|
|
1172
1294
|
file_id="string",
|
|
1173
1295
|
config_override=ExtractConfig(
|
|
1296
|
+
priority=ExtractConfigPriority.LOW,
|
|
1174
1297
|
extraction_target=ExtractTarget.PER_DOC,
|
|
1175
1298
|
extraction_mode=ExtractMode.FAST,
|
|
1176
1299
|
chunk_mode=DocumentChunkMode.PAGE,
|
|
@@ -1245,12 +1368,14 @@ class AsyncLlamaExtractClient:
|
|
|
1245
1368
|
ChunkMode,
|
|
1246
1369
|
DocumentChunkMode,
|
|
1247
1370
|
ExtractConfig,
|
|
1371
|
+
ExtractConfigPriority,
|
|
1248
1372
|
ExtractJobCreate,
|
|
1249
1373
|
ExtractMode,
|
|
1250
1374
|
ExtractTarget,
|
|
1251
1375
|
FailPageMode,
|
|
1252
1376
|
LlamaExtractSettings,
|
|
1253
1377
|
LlamaParseParameters,
|
|
1378
|
+
LlamaParseParametersPriority,
|
|
1254
1379
|
ParsingMode,
|
|
1255
1380
|
)
|
|
1256
1381
|
from llama_cloud.client import AsyncLlamaCloud
|
|
@@ -1263,6 +1388,7 @@ class AsyncLlamaExtractClient:
|
|
|
1263
1388
|
extraction_agent_id="string",
|
|
1264
1389
|
file_id="string",
|
|
1265
1390
|
config_override=ExtractConfig(
|
|
1391
|
+
priority=ExtractConfigPriority.LOW,
|
|
1266
1392
|
extraction_target=ExtractTarget.PER_DOC,
|
|
1267
1393
|
extraction_mode=ExtractMode.FAST,
|
|
1268
1394
|
chunk_mode=DocumentChunkMode.PAGE,
|
|
@@ -1271,6 +1397,7 @@ class AsyncLlamaExtractClient:
|
|
|
1271
1397
|
extract_settings=LlamaExtractSettings(
|
|
1272
1398
|
chunk_mode=ChunkMode.PAGE,
|
|
1273
1399
|
llama_parse_params=LlamaParseParameters(
|
|
1400
|
+
priority=LlamaParseParametersPriority.LOW,
|
|
1274
1401
|
parse_mode=ParsingMode.PARSE_PAGE_WITHOUT_LLM,
|
|
1275
1402
|
replace_failed_page_mode=FailPageMode.RAW_TEXT,
|
|
1276
1403
|
),
|
|
@@ -1368,6 +1495,7 @@ class AsyncLlamaExtractClient:
|
|
|
1368
1495
|
from llama_cloud import (
|
|
1369
1496
|
DocumentChunkMode,
|
|
1370
1497
|
ExtractConfig,
|
|
1498
|
+
ExtractConfigPriority,
|
|
1371
1499
|
ExtractMode,
|
|
1372
1500
|
ExtractTarget,
|
|
1373
1501
|
)
|
|
@@ -1380,6 +1508,7 @@ class AsyncLlamaExtractClient:
|
|
|
1380
1508
|
extraction_agent_id="string",
|
|
1381
1509
|
file_ids=[],
|
|
1382
1510
|
config_override=ExtractConfig(
|
|
1511
|
+
priority=ExtractConfigPriority.LOW,
|
|
1383
1512
|
extraction_target=ExtractTarget.PER_DOC,
|
|
1384
1513
|
extraction_mode=ExtractMode.FAST,
|
|
1385
1514
|
chunk_mode=DocumentChunkMode.PAGE,
|
|
@@ -221,6 +221,7 @@ class ParsingClient:
|
|
|
221
221
|
do_not_unroll_columns: bool,
|
|
222
222
|
extract_charts: bool,
|
|
223
223
|
guess_xlsx_sheet_name: bool,
|
|
224
|
+
high_res_ocr: bool,
|
|
224
225
|
html_make_all_elements_visible: bool,
|
|
225
226
|
html_remove_fixed_elements: bool,
|
|
226
227
|
html_remove_navigation_elements: bool,
|
|
@@ -339,6 +340,8 @@ class ParsingClient:
|
|
|
339
340
|
|
|
340
341
|
- guess_xlsx_sheet_name: bool.
|
|
341
342
|
|
|
343
|
+
- high_res_ocr: bool.
|
|
344
|
+
|
|
342
345
|
- html_make_all_elements_visible: bool.
|
|
343
346
|
|
|
344
347
|
- html_remove_fixed_elements: bool.
|
|
@@ -486,6 +489,7 @@ class ParsingClient:
|
|
|
486
489
|
"do_not_unroll_columns": do_not_unroll_columns,
|
|
487
490
|
"extract_charts": extract_charts,
|
|
488
491
|
"guess_xlsx_sheet_name": guess_xlsx_sheet_name,
|
|
492
|
+
"high_res_ocr": high_res_ocr,
|
|
489
493
|
"html_make_all_elements_visible": html_make_all_elements_visible,
|
|
490
494
|
"html_remove_fixed_elements": html_remove_fixed_elements,
|
|
491
495
|
"html_remove_navigation_elements": html_remove_navigation_elements,
|
|
@@ -1251,6 +1255,7 @@ class AsyncParsingClient:
|
|
|
1251
1255
|
do_not_unroll_columns: bool,
|
|
1252
1256
|
extract_charts: bool,
|
|
1253
1257
|
guess_xlsx_sheet_name: bool,
|
|
1258
|
+
high_res_ocr: bool,
|
|
1254
1259
|
html_make_all_elements_visible: bool,
|
|
1255
1260
|
html_remove_fixed_elements: bool,
|
|
1256
1261
|
html_remove_navigation_elements: bool,
|
|
@@ -1369,6 +1374,8 @@ class AsyncParsingClient:
|
|
|
1369
1374
|
|
|
1370
1375
|
- guess_xlsx_sheet_name: bool.
|
|
1371
1376
|
|
|
1377
|
+
- high_res_ocr: bool.
|
|
1378
|
+
|
|
1372
1379
|
- html_make_all_elements_visible: bool.
|
|
1373
1380
|
|
|
1374
1381
|
- html_remove_fixed_elements: bool.
|
|
@@ -1516,6 +1523,7 @@ class AsyncParsingClient:
|
|
|
1516
1523
|
"do_not_unroll_columns": do_not_unroll_columns,
|
|
1517
1524
|
"extract_charts": extract_charts,
|
|
1518
1525
|
"guess_xlsx_sheet_name": guess_xlsx_sheet_name,
|
|
1526
|
+
"high_res_ocr": high_res_ocr,
|
|
1519
1527
|
"html_make_all_elements_visible": html_make_all_elements_visible,
|
|
1520
1528
|
"html_remove_fixed_elements": html_remove_fixed_elements,
|
|
1521
1529
|
"html_remove_navigation_elements": html_remove_navigation_elements,
|
|
@@ -0,0 +1,137 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
import typing
|
|
4
|
+
import urllib.parse
|
|
5
|
+
from json.decoder import JSONDecodeError
|
|
6
|
+
|
|
7
|
+
from ...core.api_error import ApiError
|
|
8
|
+
from ...core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
|
|
9
|
+
from ...core.jsonable_encoder import jsonable_encoder
|
|
10
|
+
from ...core.remove_none_from_dict import remove_none_from_dict
|
|
11
|
+
from ...errors.unprocessable_entity_error import UnprocessableEntityError
|
|
12
|
+
from ...types.http_validation_error import HttpValidationError
|
|
13
|
+
from ...types.message import Message
|
|
14
|
+
from ...types.model_configuration import ModelConfiguration
|
|
15
|
+
|
|
16
|
+
try:
|
|
17
|
+
import pydantic
|
|
18
|
+
if pydantic.__version__.startswith("1."):
|
|
19
|
+
raise ImportError
|
|
20
|
+
import pydantic.v1 as pydantic # type: ignore
|
|
21
|
+
except ImportError:
|
|
22
|
+
import pydantic # type: ignore
|
|
23
|
+
|
|
24
|
+
# this is used as the default value for optional parameters
|
|
25
|
+
OMIT = typing.cast(typing.Any, ...)
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
class ResponsesClient:
|
|
29
|
+
def __init__(self, *, client_wrapper: SyncClientWrapper):
|
|
30
|
+
self._client_wrapper = client_wrapper
|
|
31
|
+
|
|
32
|
+
def generate_response(
|
|
33
|
+
self,
|
|
34
|
+
*,
|
|
35
|
+
project_id: typing.Optional[str] = None,
|
|
36
|
+
organization_id: typing.Optional[str] = None,
|
|
37
|
+
messages: typing.List[Message],
|
|
38
|
+
model_configuration: ModelConfiguration,
|
|
39
|
+
) -> typing.Any:
|
|
40
|
+
"""
|
|
41
|
+
EXPERIMENTAL - SSE endpoint for basic response generation (dummy stream).
|
|
42
|
+
|
|
43
|
+
Parameters:
|
|
44
|
+
- project_id: typing.Optional[str].
|
|
45
|
+
|
|
46
|
+
- organization_id: typing.Optional[str].
|
|
47
|
+
|
|
48
|
+
- messages: typing.List[Message]. List of messages in the conversation
|
|
49
|
+
|
|
50
|
+
- model_configuration: ModelConfiguration. Configuration for the model to use in the response
|
|
51
|
+
---
|
|
52
|
+
from llama_cloud import ModelConfiguration, SupportedLlmModelNames
|
|
53
|
+
from llama_cloud.client import LlamaCloud
|
|
54
|
+
|
|
55
|
+
client = LlamaCloud(
|
|
56
|
+
token="YOUR_TOKEN",
|
|
57
|
+
)
|
|
58
|
+
client.responses.generate_response(
|
|
59
|
+
messages=[],
|
|
60
|
+
model_configuration=ModelConfiguration(
|
|
61
|
+
model_name=SupportedLlmModelNames.GPT_4_O,
|
|
62
|
+
),
|
|
63
|
+
)
|
|
64
|
+
"""
|
|
65
|
+
_response = self._client_wrapper.httpx_client.request(
|
|
66
|
+
"POST",
|
|
67
|
+
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/responses/generate"),
|
|
68
|
+
params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
|
|
69
|
+
json=jsonable_encoder({"messages": messages, "model_configuration": model_configuration}),
|
|
70
|
+
headers=self._client_wrapper.get_headers(),
|
|
71
|
+
timeout=60,
|
|
72
|
+
)
|
|
73
|
+
if 200 <= _response.status_code < 300:
|
|
74
|
+
return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
|
|
75
|
+
if _response.status_code == 422:
|
|
76
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
77
|
+
try:
|
|
78
|
+
_response_json = _response.json()
|
|
79
|
+
except JSONDecodeError:
|
|
80
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
81
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
82
|
+
|
|
83
|
+
|
|
84
|
+
class AsyncResponsesClient:
|
|
85
|
+
def __init__(self, *, client_wrapper: AsyncClientWrapper):
|
|
86
|
+
self._client_wrapper = client_wrapper
|
|
87
|
+
|
|
88
|
+
async def generate_response(
|
|
89
|
+
self,
|
|
90
|
+
*,
|
|
91
|
+
project_id: typing.Optional[str] = None,
|
|
92
|
+
organization_id: typing.Optional[str] = None,
|
|
93
|
+
messages: typing.List[Message],
|
|
94
|
+
model_configuration: ModelConfiguration,
|
|
95
|
+
) -> typing.Any:
|
|
96
|
+
"""
|
|
97
|
+
EXPERIMENTAL - SSE endpoint for basic response generation (dummy stream).
|
|
98
|
+
|
|
99
|
+
Parameters:
|
|
100
|
+
- project_id: typing.Optional[str].
|
|
101
|
+
|
|
102
|
+
- organization_id: typing.Optional[str].
|
|
103
|
+
|
|
104
|
+
- messages: typing.List[Message]. List of messages in the conversation
|
|
105
|
+
|
|
106
|
+
- model_configuration: ModelConfiguration. Configuration for the model to use in the response
|
|
107
|
+
---
|
|
108
|
+
from llama_cloud import ModelConfiguration, SupportedLlmModelNames
|
|
109
|
+
from llama_cloud.client import AsyncLlamaCloud
|
|
110
|
+
|
|
111
|
+
client = AsyncLlamaCloud(
|
|
112
|
+
token="YOUR_TOKEN",
|
|
113
|
+
)
|
|
114
|
+
await client.responses.generate_response(
|
|
115
|
+
messages=[],
|
|
116
|
+
model_configuration=ModelConfiguration(
|
|
117
|
+
model_name=SupportedLlmModelNames.GPT_4_O,
|
|
118
|
+
),
|
|
119
|
+
)
|
|
120
|
+
"""
|
|
121
|
+
_response = await self._client_wrapper.httpx_client.request(
|
|
122
|
+
"POST",
|
|
123
|
+
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/responses/generate"),
|
|
124
|
+
params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
|
|
125
|
+
json=jsonable_encoder({"messages": messages, "model_configuration": model_configuration}),
|
|
126
|
+
headers=self._client_wrapper.get_headers(),
|
|
127
|
+
timeout=60,
|
|
128
|
+
)
|
|
129
|
+
if 200 <= _response.status_code < 300:
|
|
130
|
+
return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
|
|
131
|
+
if _response.status_code == 422:
|
|
132
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
133
|
+
try:
|
|
134
|
+
_response_json = _response.json()
|
|
135
|
+
except JSONDecodeError:
|
|
136
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
137
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
llama_cloud/types/__init__.py
CHANGED
|
@@ -15,7 +15,10 @@ from .advanced_mode_transform_config_segmentation_config import (
|
|
|
15
15
|
AdvancedModeTransformConfigSegmentationConfig_None,
|
|
16
16
|
AdvancedModeTransformConfigSegmentationConfig_Page,
|
|
17
17
|
)
|
|
18
|
+
from .agent_deployment_list import AgentDeploymentList
|
|
19
|
+
from .agent_deployment_summary import AgentDeploymentSummary
|
|
18
20
|
from .app_schema_chat_chat_message import AppSchemaChatChatMessage
|
|
21
|
+
from .app_schema_responses_message_role import AppSchemaResponsesMessageRole
|
|
19
22
|
from .audio_block import AudioBlock
|
|
20
23
|
from .auto_transform_config import AutoTransformConfig
|
|
21
24
|
from .azure_open_ai_embedding import AzureOpenAiEmbedding
|
|
@@ -108,6 +111,7 @@ from .eval_execution_params import EvalExecutionParams
|
|
|
108
111
|
from .extract_agent import ExtractAgent
|
|
109
112
|
from .extract_agent_data_schema_value import ExtractAgentDataSchemaValue
|
|
110
113
|
from .extract_config import ExtractConfig
|
|
114
|
+
from .extract_config_priority import ExtractConfigPriority
|
|
111
115
|
from .extract_job import ExtractJob
|
|
112
116
|
from .extract_job_create import ExtractJobCreate
|
|
113
117
|
from .extract_job_create_data_schema_override import ExtractJobCreateDataSchemaOverride
|
|
@@ -125,6 +129,8 @@ from .extract_run_data_item_value import ExtractRunDataItemValue
|
|
|
125
129
|
from .extract_run_data_schema_value import ExtractRunDataSchemaValue
|
|
126
130
|
from .extract_run_data_zero_value import ExtractRunDataZeroValue
|
|
127
131
|
from .extract_run_extraction_metadata_value import ExtractRunExtractionMetadataValue
|
|
132
|
+
from .extract_schema_generate_response import ExtractSchemaGenerateResponse
|
|
133
|
+
from .extract_schema_generate_response_data_schema_value import ExtractSchemaGenerateResponseDataSchemaValue
|
|
128
134
|
from .extract_schema_validate_response import ExtractSchemaValidateResponse
|
|
129
135
|
from .extract_schema_validate_response_data_schema_value import ExtractSchemaValidateResponseDataSchemaValue
|
|
130
136
|
from .extract_state import ExtractState
|
|
@@ -175,19 +181,22 @@ from .llama_index_core_base_llms_types_chat_message_blocks_item import (
|
|
|
175
181
|
LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem_Image,
|
|
176
182
|
LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem_Text,
|
|
177
183
|
)
|
|
184
|
+
from .llama_index_core_base_llms_types_message_role import LlamaIndexCoreBaseLlmsTypesMessageRole
|
|
178
185
|
from .llama_parse_parameters import LlamaParseParameters
|
|
186
|
+
from .llama_parse_parameters_priority import LlamaParseParametersPriority
|
|
179
187
|
from .llama_parse_supported_file_extensions import LlamaParseSupportedFileExtensions
|
|
180
188
|
from .llm_model_data import LlmModelData
|
|
181
189
|
from .llm_parameters import LlmParameters
|
|
182
190
|
from .load_files_job_config import LoadFilesJobConfig
|
|
183
191
|
from .managed_ingestion_status import ManagedIngestionStatus
|
|
184
192
|
from .managed_ingestion_status_response import ManagedIngestionStatusResponse
|
|
193
|
+
from .message import Message
|
|
185
194
|
from .message_annotation import MessageAnnotation
|
|
186
|
-
from .message_role import MessageRole
|
|
187
195
|
from .metadata_filter import MetadataFilter
|
|
188
196
|
from .metadata_filter_value import MetadataFilterValue
|
|
189
197
|
from .metadata_filters import MetadataFilters
|
|
190
198
|
from .metadata_filters_filters_item import MetadataFiltersFiltersItem
|
|
199
|
+
from .model_configuration import ModelConfiguration
|
|
191
200
|
from .node_relationship import NodeRelationship
|
|
192
201
|
from .none_chunking_config import NoneChunkingConfig
|
|
193
202
|
from .none_segmentation_config import NoneSegmentationConfig
|
|
@@ -207,6 +216,7 @@ from .paginated_list_cloud_documents_response import PaginatedListCloudDocuments
|
|
|
207
216
|
from .paginated_list_pipeline_files_response import PaginatedListPipelineFilesResponse
|
|
208
217
|
from .paginated_report_response import PaginatedReportResponse
|
|
209
218
|
from .parse_job_config import ParseJobConfig
|
|
219
|
+
from .parse_job_config_priority import ParseJobConfigPriority
|
|
210
220
|
from .parse_plan_level import ParsePlanLevel
|
|
211
221
|
from .parser_languages import ParserLanguages
|
|
212
222
|
from .parsing_history_item import ParsingHistoryItem
|
|
@@ -324,6 +334,7 @@ from .struct_parse_conf import StructParseConf
|
|
|
324
334
|
from .supported_llm_model import SupportedLlmModel
|
|
325
335
|
from .supported_llm_model_names import SupportedLlmModelNames
|
|
326
336
|
from .text_block import TextBlock
|
|
337
|
+
from .text_content_block import TextContentBlock
|
|
327
338
|
from .text_node import TextNode
|
|
328
339
|
from .text_node_relationships_value import TextNodeRelationshipsValue
|
|
329
340
|
from .text_node_with_score import TextNodeWithScore
|
|
@@ -355,7 +366,10 @@ __all__ = [
|
|
|
355
366
|
"AdvancedModeTransformConfigSegmentationConfig_Element",
|
|
356
367
|
"AdvancedModeTransformConfigSegmentationConfig_None",
|
|
357
368
|
"AdvancedModeTransformConfigSegmentationConfig_Page",
|
|
369
|
+
"AgentDeploymentList",
|
|
370
|
+
"AgentDeploymentSummary",
|
|
358
371
|
"AppSchemaChatChatMessage",
|
|
372
|
+
"AppSchemaResponsesMessageRole",
|
|
359
373
|
"AudioBlock",
|
|
360
374
|
"AutoTransformConfig",
|
|
361
375
|
"AzureOpenAiEmbedding",
|
|
@@ -444,6 +458,7 @@ __all__ = [
|
|
|
444
458
|
"ExtractAgent",
|
|
445
459
|
"ExtractAgentDataSchemaValue",
|
|
446
460
|
"ExtractConfig",
|
|
461
|
+
"ExtractConfigPriority",
|
|
447
462
|
"ExtractJob",
|
|
448
463
|
"ExtractJobCreate",
|
|
449
464
|
"ExtractJobCreateDataSchemaOverride",
|
|
@@ -461,6 +476,8 @@ __all__ = [
|
|
|
461
476
|
"ExtractRunDataSchemaValue",
|
|
462
477
|
"ExtractRunDataZeroValue",
|
|
463
478
|
"ExtractRunExtractionMetadataValue",
|
|
479
|
+
"ExtractSchemaGenerateResponse",
|
|
480
|
+
"ExtractSchemaGenerateResponseDataSchemaValue",
|
|
464
481
|
"ExtractSchemaValidateResponse",
|
|
465
482
|
"ExtractSchemaValidateResponseDataSchemaValue",
|
|
466
483
|
"ExtractState",
|
|
@@ -507,19 +524,22 @@ __all__ = [
|
|
|
507
524
|
"LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem_Document",
|
|
508
525
|
"LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem_Image",
|
|
509
526
|
"LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem_Text",
|
|
527
|
+
"LlamaIndexCoreBaseLlmsTypesMessageRole",
|
|
510
528
|
"LlamaParseParameters",
|
|
529
|
+
"LlamaParseParametersPriority",
|
|
511
530
|
"LlamaParseSupportedFileExtensions",
|
|
512
531
|
"LlmModelData",
|
|
513
532
|
"LlmParameters",
|
|
514
533
|
"LoadFilesJobConfig",
|
|
515
534
|
"ManagedIngestionStatus",
|
|
516
535
|
"ManagedIngestionStatusResponse",
|
|
536
|
+
"Message",
|
|
517
537
|
"MessageAnnotation",
|
|
518
|
-
"MessageRole",
|
|
519
538
|
"MetadataFilter",
|
|
520
539
|
"MetadataFilterValue",
|
|
521
540
|
"MetadataFilters",
|
|
522
541
|
"MetadataFiltersFiltersItem",
|
|
542
|
+
"ModelConfiguration",
|
|
523
543
|
"NodeRelationship",
|
|
524
544
|
"NoneChunkingConfig",
|
|
525
545
|
"NoneSegmentationConfig",
|
|
@@ -539,6 +559,7 @@ __all__ = [
|
|
|
539
559
|
"PaginatedListPipelineFilesResponse",
|
|
540
560
|
"PaginatedReportResponse",
|
|
541
561
|
"ParseJobConfig",
|
|
562
|
+
"ParseJobConfigPriority",
|
|
542
563
|
"ParsePlanLevel",
|
|
543
564
|
"ParserLanguages",
|
|
544
565
|
"ParsingHistoryItem",
|
|
@@ -646,6 +667,7 @@ __all__ = [
|
|
|
646
667
|
"SupportedLlmModel",
|
|
647
668
|
"SupportedLlmModelNames",
|
|
648
669
|
"TextBlock",
|
|
670
|
+
"TextContentBlock",
|
|
649
671
|
"TextNode",
|
|
650
672
|
"TextNodeRelationshipsValue",
|
|
651
673
|
"TextNodeWithScore",
|
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
import datetime as dt
|
|
4
|
+
import typing
|
|
5
|
+
|
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
|
7
|
+
from .agent_deployment_summary import AgentDeploymentSummary
|
|
8
|
+
|
|
9
|
+
try:
|
|
10
|
+
import pydantic
|
|
11
|
+
if pydantic.__version__.startswith("1."):
|
|
12
|
+
raise ImportError
|
|
13
|
+
import pydantic.v1 as pydantic # type: ignore
|
|
14
|
+
except ImportError:
|
|
15
|
+
import pydantic # type: ignore
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class AgentDeploymentList(pydantic.BaseModel):
|
|
19
|
+
deployments: typing.List[AgentDeploymentSummary] = pydantic.Field(description="List of deployments")
|
|
20
|
+
|
|
21
|
+
def json(self, **kwargs: typing.Any) -> str:
|
|
22
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
23
|
+
return super().json(**kwargs_with_defaults)
|
|
24
|
+
|
|
25
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
|
26
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
27
|
+
return super().dict(**kwargs_with_defaults)
|
|
28
|
+
|
|
29
|
+
class Config:
|
|
30
|
+
frozen = True
|
|
31
|
+
smart_union = True
|
|
32
|
+
json_encoders = {dt.datetime: serialize_datetime}
|